source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
sparse_matrix_multiplication_utility.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_SPARSE_MATRIX_MULTIPLICATION_UTILITY_H_INCLUDED )
#define KRATOS_SPARSE_MATRIX_MULTIPLICATION_UTILITY_H_INCLUDED
// System includes
#include <vector>
#include <math.h>
#include <algorithm>
#include <numeric>
#ifdef _OPENMP
#include <omp.h>
#endif
// External includes
#include "amgcl/value_type/interface.hpp"
// Project includes
#include "includes/define.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class SparseMatrixMultiplicationUtility
* @ingroup ContactStructuralMechanicsApplication
* @brief An utility to multiply sparse matrix in Ublas
* @details Taken and adapted for ublas from external_libraries/amgcl/detail/spgemm.hpp by Denis Demidov <dennis.demidov@gmail.com>
* @todo Remove as soon as we do not depend of Ublas anymore...
* @author Vicente Mataix Ferrandiz
*/
class SparseMatrixMultiplicationUtility
{
public:
///@name Type Definitions
///@{
/// Pointer definition of TreeContactSearch
KRATOS_CLASS_POINTER_DEFINITION( SparseMatrixMultiplicationUtility );
/// The size type
typedef std::size_t SizeType;
/// The index type
typedef std::size_t IndexType;
/// The signed index type
typedef std::ptrdiff_t SignedIndexType;
/// A vector of indexes
typedef DenseVector<IndexType> IndexVectorType;
/// A vector of indexes (signed)
typedef DenseVector<SignedIndexType> SignedIndexVectorType;
///@}
///@name Life Cycle
///@{
/// Default constructor
SparseMatrixMultiplicationUtility(){};
/// Desctructor
virtual ~SparseMatrixMultiplicationUtility()= default;;
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/// Metafunction that returns value type of a matrix or a vector type.
template <class T, class Enable = void>
struct value_type {
typedef typename T::value_type type;
};
/**
* @brief Matrix-matrix product C = A·B
* @detail This method uses a template for each matrix
* @param rA The first matrix
* @param rB The second matrix
* @param rC The resulting matrix
*/
template <class AMatrix, class BMatrix, class CMatrix>
static void MatrixMultiplication(
const AMatrix& rA,
const BMatrix& rB,
CMatrix& rC
)
{
#ifdef _OPENMP
const int nt = omp_get_max_threads();
#else
const int nt = 1;
#endif
if (nt > 16) {
MatrixMultiplicationRMerge(rA, rB, rC);
} else {
MatrixMultiplicationSaad(rA, rB, rC);
}
}
/**
* @brief The first is an OpenMP-enabled modification of classic algorithm from Saad
* @details It is used whenever number of OpenMP cores is 4 or less. Saad, Yousef. Iterative methods for sparse linear systems. Siam, 2003.
* @param A The first matrix to multiply
* @param B The second matrix to multiply
* @param C The resulting matrix
*/
template <class AMatrix, class BMatrix, class CMatrix>
static void MatrixMultiplicationSaad(
const AMatrix& A,
const BMatrix& B,
CMatrix& C
)
{
typedef typename value_type<CMatrix>::type ValueType;
// Auxiliar sizes
const SizeType nrows = A.size1();
const SizeType ncols = B.size2();
// Exiting just in case of empty matrix
if ((nrows == 0) || (ncols == 0))
return void();
// Get access to A, B and C data
const IndexType* index1_a = A.index1_data().begin();
const IndexType* index2_a = A.index2_data().begin();
const double* values_a = A.value_data().begin();
const IndexType* index1_b = B.index1_data().begin();
const IndexType* index2_b = B.index2_data().begin();
const double* values_b = B.value_data().begin();
IndexType* c_ptr = new IndexType[nrows + 1];
c_ptr[0] = 0;
#pragma omp parallel
{
SignedIndexVectorType marker(ncols);
for (int i_fill = 0; i_fill < static_cast<int>(ncols); ++i_fill)
marker[i_fill] = -1;
#pragma omp for
for(int ia = 0; ia < static_cast<int>(nrows); ++ia) {
const IndexType row_begin_a = index1_a[ia];
const IndexType row_end_a = index1_a[ia+1];
IndexType C_cols = 0;
for(IndexType ja = row_begin_a; ja < row_end_a; ++ja) {
const IndexType ca = index2_a[ja];
const IndexType row_begin_b = index1_b[ca];
const IndexType row_end_b = index1_b[ca+1];
for(IndexType jb = row_begin_b; jb < row_end_b; ++jb) {
const IndexType cb = index2_b[jb];
if (marker[cb] != ia) {
marker[cb] = ia;
++C_cols;
}
}
}
c_ptr[ia + 1] = C_cols;
}
}
// We initialize the sparse matrix
std::partial_sum(c_ptr, c_ptr + nrows + 1, c_ptr);
const SizeType nonzero_values = c_ptr[nrows];
IndexType* aux_index2_c = new IndexType[nonzero_values];
ValueType* aux_val_c = new ValueType[nonzero_values];
#pragma omp parallel
{
SignedIndexVectorType marker(ncols);
for (int i_fill = 0; i_fill < static_cast<int>(ncols); ++i_fill)
marker[i_fill] = -1;
#pragma omp for
for(int ia = 0; ia < static_cast<int>(nrows); ++ia) {
const IndexType row_begin_a = index1_a[ia];
const IndexType row_end_a = index1_a[ia+1];
const IndexType row_beg = c_ptr[ia];
IndexType row_end = row_beg;
for(IndexType ja = row_begin_a; ja < row_end_a; ++ja) {
const IndexType ca = index2_a[ja];
const ValueType va = values_a[ja];
const IndexType row_begin_b = index1_b[ca];
const IndexType row_end_b = index1_b[ca+1];
for(IndexType jb = row_begin_b; jb < row_end_b; ++jb) {
const IndexType cb = index2_b[jb];
const ValueType vb = values_b[jb];
if (marker[cb] < static_cast<SignedIndexType>(row_beg)) {
marker[cb] = row_end;
aux_index2_c[row_end] = cb;
aux_val_c[row_end] = va * vb;
++row_end;
} else {
aux_val_c[marker[cb]] += va * vb;
}
}
}
}
}
// We reorder the rows
SortRows(c_ptr, nrows, ncols, aux_index2_c, aux_val_c);
// We fill the matrix
CreateSolutionMatrix(C, nrows, ncols, c_ptr, aux_index2_c, aux_val_c);
// Release memory
delete[] c_ptr;
delete[] aux_index2_c;
delete[] aux_val_c;
}
/**
* @brief Row-merge algorithm from Rupp et al.
* @details The algorithm requires less memory and shows much better scalability than classic one. It is used when number of OpenMP cores is more than 4.
* @param A The first matrix to multiply
* @param B The second matrix to multiply
* @param C The resulting matrix
*/
template <class AMatrix, class BMatrix, class CMatrix>
static void MatrixMultiplicationRMerge(
const AMatrix &A,
const BMatrix &B,
CMatrix &C
)
{
typedef typename value_type<CMatrix>::type ValueType;
// Auxiliar sizes
const SizeType nrows = A.size1();
const SizeType ncols = B.size2();
// Exiting just in case of empty matrix
if ((nrows == 0) || (ncols == 0))
return void();
// Get access to A and B data
const IndexType* index1_a = A.index1_data().begin();
const IndexType* index2_a = A.index2_data().begin();
const double* values_a = A.value_data().begin();
const IndexType* index1_b = B.index1_data().begin();
const IndexType* index2_b = B.index2_data().begin();
const double* values_b = B.value_data().begin();
IndexType max_row_width = 0;
#pragma omp parallel
{
IndexType my_max = 0;
#pragma omp for
for(int i = 0; i < static_cast<int>(nrows); ++i) {
const IndexType row_beg = index1_a[i];
const IndexType row_end = index1_a[i+1];
IndexType row_width = 0;
for(IndexType j = row_beg; j < row_end; ++j) {
const IndexType a_col = index2_a[j];
row_width += index1_b[a_col + 1] - index1_b[a_col];
}
my_max = std::max(my_max, row_width);
}
#pragma omp critical
max_row_width = std::max(max_row_width, my_max);
}
#ifdef _OPENMP
const int nthreads = omp_get_max_threads();
#else
const int nthreads = 1;
#endif
std::vector< std::vector<IndexType> > tmp_col(nthreads);
std::vector< std::vector<ValueType> > tmp_val(nthreads);
for(int i = 0; i < nthreads; ++i) {
tmp_col[i].resize(3 * max_row_width);
tmp_val[i].resize(2 * max_row_width);
}
// We create the c_ptr auxiliar variable
IndexType* c_ptr = new IndexType[nrows + 1];
c_ptr[0] = 0;
#pragma omp parallel
{
#ifdef _OPENMP
const int tid = omp_get_thread_num();
#else
const int tid = 0;
#endif
IndexType* t_col = &tmp_col[tid][0];
#pragma omp for
for(int i = 0; i < static_cast<int>(nrows); ++i) {
const IndexType row_beg = index1_a[i];
const IndexType row_end = index1_a[i+1];
c_ptr[i+1] = ProdRowWidth( index2_a + row_beg, index2_a + row_end, index1_b, index2_b, t_col, t_col + max_row_width, t_col + 2 * max_row_width );
}
}
// We initialize the sparse matrix
std::partial_sum(c_ptr, c_ptr + nrows + 1, c_ptr);
const SizeType nonzero_values = c_ptr[nrows];
IndexType* aux_index2_c = new IndexType[nonzero_values];
ValueType* aux_val_c = new ValueType[nonzero_values];
#pragma omp parallel
{
#ifdef _OPENMP
const int tid = omp_get_thread_num();
#else
const int tid = 0;
#endif
IndexType* t_col = tmp_col[tid].data();
ValueType *t_val = tmp_val[tid].data();
#pragma omp for
for(int i = 0; i < static_cast<int>(nrows); ++i) {
const IndexType row_beg = index1_a[i];
const IndexType row_end = index1_a[i+1];
ProdRow(index2_a + row_beg, index2_a + row_end, values_a + row_beg,
index1_b, index2_b, values_b, aux_index2_c + c_ptr[i], aux_val_c + c_ptr[i], t_col, t_val, t_col + max_row_width, t_val + max_row_width );
}
}
// We fill the matrix
CreateSolutionMatrix(C, nrows, ncols, c_ptr, aux_index2_c, aux_val_c);
// Release memory
delete[] c_ptr;
delete[] aux_index2_c;
delete[] aux_val_c;
}
/**
* @brief The first is a method in order to sum to sparse matrices in a efficient way
* @param A The resulting matrix
* @param B The second matrix to sum
*/
template <class AMatrix, class BMatrix>
static void MatrixAdd(
AMatrix& A,
const BMatrix& B,
const double Factor = 1.0
)
{
typedef typename value_type<AMatrix>::type ValueType;
// Auxiliar sizes
const SizeType nrows = A.size1();
const SizeType ncols = A.size2();
/* Some checks */
// Exiting just in case of empty matrix
if ((nrows == 0) || (ncols == 0))
return void();
KRATOS_ERROR_IF_NOT(nrows == B.size1()) << "The second matrix has a wrong number of rows" << std::endl;
KRATOS_ERROR_IF_NOT(ncols == B.size2()) << "The second matrix has a wrong number of columns" << std::endl;
// Get access to A and B data
const IndexType* index1_a = A.index1_data().begin();
const IndexType* index2_a = A.index2_data().begin();
const double* values_a = A.value_data().begin();
const IndexType* index1_b = B.index1_data().begin();
const IndexType* index2_b = B.index2_data().begin();
const double* values_b = B.value_data().begin();
IndexType* new_a_ptr = new IndexType[nrows + 1];
new_a_ptr[0] = 0;
#pragma omp parallel
{
#pragma omp for
for(int ia = 0; ia < static_cast<int>(nrows); ++ia) {
SignedIndexVectorType marker(ncols);
for (int i = 0; i < static_cast<int>(ncols); ++i)
marker[i] = -1;
// Initialize
IndexType new_A_cols = 0;
// Iterate over A
const IndexType row_begin_a = index1_a[ia];
const IndexType row_end_a = index1_a[ia+1];
for(IndexType ja = row_begin_a; ja < row_end_a; ++ja) {
const IndexType ca = index2_a[ja];
marker[ca] = 1;
++new_A_cols;
}
// Iterate over B
const IndexType row_begin_b = index1_b[ia];
const IndexType row_end_b = index1_b[ia+1];
for(IndexType jb = row_begin_b; jb < row_end_b; ++jb) {
const IndexType cb = index2_b[jb];
if (marker[cb] < 0) {
marker[cb] = 1;
++new_A_cols;
}
}
new_a_ptr[ia + 1] = new_A_cols;
}
}
// We initialize the sparse matrix
std::partial_sum(new_a_ptr, new_a_ptr + nrows + 1, new_a_ptr);
const SizeType nonzero_values = new_a_ptr[nrows];
IndexType* aux_index2_new_a = new IndexType[nonzero_values];
ValueType* aux_val_new_a = new ValueType[nonzero_values];
#pragma omp parallel
{
#pragma omp for
for(int ia = 0; ia < static_cast<int>(nrows); ++ia) {
SignedIndexVectorType marker(ncols);
for (int i = 0; i < static_cast<int>(ncols); ++i)
marker[i] = -1;
// Initialize
const IndexType row_beg = new_a_ptr[ia];
IndexType row_end = row_beg;
// Iterate over A
const IndexType row_begin_a = index1_a[ia];
const IndexType row_end_a = index1_a[ia+1];
for(IndexType ja = row_begin_a; ja < row_end_a; ++ja) {
const IndexType ca = index2_a[ja];
const ValueType va = values_a[ja];
marker[ca] = row_end;
aux_index2_new_a[row_end] = ca;
aux_val_new_a[row_end] = va;
++row_end;
}
// Iterate over B
const IndexType row_begin_b = index1_b[ia];
const IndexType row_end_b = index1_b[ia+1];
for(IndexType jb = row_begin_b; jb < row_end_b; ++jb) {
const IndexType cb = index2_b[jb];
const ValueType vb = values_b[jb];
if (marker[cb] < 0) {
marker[cb] = row_end;
aux_index2_new_a[row_end] = cb;
aux_val_new_a[row_end] = Factor * vb;
++row_end;
} else {
aux_val_new_a[marker[cb]] += Factor * vb;
}
}
}
}
// We reorder the rows
SortRows(new_a_ptr, nrows, ncols, aux_index2_new_a, aux_val_new_a);
// We fill the matrix
CreateSolutionMatrix(A, nrows, ncols, new_a_ptr, aux_index2_new_a, aux_val_new_a);
// Release memory
delete[] new_a_ptr;
delete[] aux_index2_new_a;
delete[] aux_val_new_a;
}
/**
* @brief This method computes of the transpose matrix of a given matrix
* @param rA The resulting matrix
* @param rB The second matrix to transpose
*/
template <class AMatrix, class BMatrix>
static void TransposeMatrix(
AMatrix& rA,
const BMatrix& rB,
const double Factor = 1.0
)
{
typedef typename value_type<AMatrix>::type ValueType;
// Get access to B data
const IndexType* index1 = rB.index1_data().begin();
const IndexType* index2 = rB.index2_data().begin();
const ValueType* data = rB.value_data().begin();
const SizeType transpose_nonzero_values = rB.value_data().end() - rB.value_data().begin();
const SizeType size_system_1 = rB.size1();
const SizeType size_system_2 = rB.size2();
if (rA.size1() != size_system_2 || rA.size2() != size_system_1 ) {
rA.resize(size_system_2, size_system_1, false);
}
IndexVectorType new_a_ptr(size_system_2 + 1);
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(size_system_2 + 1); ++i)
new_a_ptr[i] = 0;
IndexVectorType aux_index2_new_a(transpose_nonzero_values);
DenseVector<ValueType> aux_val_new_a(transpose_nonzero_values);
#pragma omp parallel for
for (int i=0; i<static_cast<int>(size_system_1); ++i) {
IndexType row_begin = index1[i];
IndexType row_end = index1[i+1];
for (IndexType j=row_begin; j<row_end; j++) {
#pragma omp atomic
new_a_ptr[index2[j] + 1] += 1;
}
}
// We initialize the blocks sparse matrix
std::partial_sum(new_a_ptr.begin(), new_a_ptr.end(), &new_a_ptr[0]);
IndexVectorType aux_indexes(size_system_2);
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(size_system_2); ++i)
aux_indexes[i] = 0;
// #pragma omp parallel for
for (int i=0; i<static_cast<int>(size_system_1); ++i) {
IndexType row_begin = index1[i];
IndexType row_end = index1[i+1];
for (IndexType j=row_begin; j<row_end; j++) {
const IndexType current_row = index2[j];
const IndexType initial_position = new_a_ptr[current_row];
const IndexType current_index = initial_position + aux_indexes[current_row];
aux_index2_new_a[current_index] = i;
aux_val_new_a[current_index] = Factor * data[j];
// #pragma omp atomic
aux_indexes[current_row] += 1;
}
}
// We reorder the rows
SortRows(&new_a_ptr[0], size_system_2, size_system_1, &aux_index2_new_a[0], &aux_val_new_a[0]);
// We fill the matrix
CreateSolutionMatrix(rA, size_system_2, size_system_1, &new_a_ptr[0], &aux_index2_new_a[0], &aux_val_new_a[0]);
}
/**
* @brief This method is designed to create the final solution sparse matrix from the auxiliar values
* @param C The matrix solution
* @param NRows The number of rows of the matrix
* @param NCols The number of columns of the matrix
* @param CPtr The indexes taht indicate the number of nonzero values in each column
* @param AuxIndex2C The indexes of the nonzero columns
* @param AuxValC The C array containing the values of the sparse matrix
*/
template <class CMatrix, typename TSize, typename Ptr, typename IndexType, typename ValueType>
static inline void CreateSolutionMatrix(
CMatrix& C,
const TSize NRows,
const TSize NCols,
const Ptr* CPtr,
const IndexType* AuxIndex2C,
const ValueType* AuxValC
)
{
// Exiting just in case of empty matrix
if ((NRows == 0) || (NCols == 0))
return void();
// Auxiliar values
const TSize nonzero_values = CPtr[NRows];
C = CMatrix(NRows, NCols, nonzero_values);
IndexType* index1_c = C.index1_data().begin();
IndexType* index2_c = C.index2_data().begin();
double* values_c = C.value_data().begin();
index1_c[0] = 0;
for (TSize i = 0; i < NRows; i++)
index1_c[i+1] = index1_c[i] + (CPtr[i+1] - CPtr[i]);
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(nonzero_values); i++) {
KRATOS_DEBUG_ERROR_IF(AuxIndex2C[i] > static_cast<IndexType>(NCols)) << "Index " << AuxIndex2C[i] <<" is greater than the number of columns " << NCols << std::endl;
index2_c[i] = AuxIndex2C[i];
values_c[i] = AuxValC[i];
}
C.set_filled(NRows+1, nonzero_values);
}
/**
* @brief This method is designed to reorder the rows by columns
* @param NRows The number of rows of the matrix
* @param NCols The number of columns of the matrix
* @param CPtr The indexes taht indicate the number of nonzero values in each column
* @param Columns The columns of the problem
* @param Values The values (to be ordered with the rows)
*/
template <typename TSize, typename Col, typename TIndexType, typename ValueType>
static inline void SortRows(
const TIndexType* CPtr,
const TSize NRows,
const TSize NCols,
Col* Columns,
ValueType* Values
)
{
#pragma omp parallel
{
#pragma omp for
for (int i_row=0; i_row<static_cast<int>(NRows); i_row++) {
const TIndexType row_beg = CPtr[i_row];
const TIndexType row_end = CPtr[i_row + 1];
for(IndexType j = 1; j < row_end - row_beg; ++j) {
const IndexType c = Columns[j + row_beg];
const double v = Values[j + row_beg];
SignedIndexType i = j - 1;
while(i >= 0 && Columns[i + row_beg] > c) {
KRATOS_DEBUG_ERROR_IF(Columns[i + row_beg] > static_cast<Col>(NCols)) << " Index for column: " << i + row_beg << ". Index " << Columns[i + row_beg] <<" is greater than the number of columns " << NCols << std::endl;
Columns[i + 1 + row_beg] = Columns[i + row_beg];
Values[i + 1 + row_beg] = Values[i + row_beg];
i--;
}
Columns[i + 1 + row_beg] = c;
Values[i + 1 + row_beg] = v;
}
}
}
}
/**
* @brief This method assembles several sparse matrices into one large sparse matrix
* @param rMatricespBlocks The pointers to the matrices we are interested in assemble
* @param ContributionCoefficients The matrix containing the coefficients to be considered (copy, so we don't need to provide it)
* @param TransposeBlocks The matrix containing the flags telling us to transpose the blocks (copy, so we don't need to provide it)
*/
static inline void AssembleSparseMatrixByBlocks(
CompressedMatrix& rMatrix,
const DenseMatrix<CompressedMatrix*>& rMatricespBlocks,
DenseMatrix<double> ContributionCoefficients = DenseMatrix<double>(0,0),
DenseMatrix<bool> TransposeBlocks = DenseMatrix<bool>(0,0)
)
{
const SizeType number_of_rows_blocks = rMatricespBlocks.size1();
const SizeType number_of_columns_blocks = rMatricespBlocks.size2();
// Fill the matrices if they are empty
if (ContributionCoefficients.size1() == 0 && ContributionCoefficients.size2() == 0) {
ContributionCoefficients.resize(number_of_rows_blocks, number_of_columns_blocks);
for (IndexType i = 0; i < number_of_rows_blocks; ++i) {
for (IndexType j = 0; j < number_of_columns_blocks; ++j) {
ContributionCoefficients(i, j) = 1.0;
}
}
} else {
KRATOS_ERROR_IF(ContributionCoefficients.size1() != number_of_rows_blocks || ContributionCoefficients.size2() != number_of_columns_blocks) << "The ContributionCoefficients dimensions" << ContributionCoefficients.size1() << " and " << ContributionCoefficients.size2() << "do not coincide with the dimensions of rMatricespBlocks" << number_of_rows_blocks << "and " << number_of_columns_blocks << std::endl;
}
if (TransposeBlocks.size1() == 0 && TransposeBlocks.size2() == 0) {
TransposeBlocks.resize(number_of_rows_blocks, number_of_columns_blocks);
for (IndexType i = 0; i < number_of_rows_blocks; ++i) {
for (IndexType j = 0; j < number_of_rows_blocks; ++j) {
TransposeBlocks(i, j) = false;
}
}
} else {
KRATOS_ERROR_IF(TransposeBlocks.size1() != number_of_rows_blocks || TransposeBlocks.size2() != number_of_columns_blocks) << "The TransposeBlocks dimensions" << TransposeBlocks.size1() << " and " << TransposeBlocks.size2() << "do not coincide with the dimensions of rMatricespBlocks" << number_of_rows_blocks << "and " << number_of_columns_blocks << std::endl;
}
// Compute total size and check consistency of the different blocks
SizeType nrows = 0, ncols = 0;
std::vector<SizeType> row_sizes(number_of_rows_blocks);
std::vector<SizeType> column_sizes(number_of_columns_blocks);
for (int i=0; i<static_cast<int>(number_of_rows_blocks); ++i) {
if (TransposeBlocks(i, 0)) {
row_sizes[i] = (*rMatricespBlocks(i, 0)).size2();
} else {
row_sizes[i] = (*rMatricespBlocks(i, 0)).size1();
}
nrows += row_sizes[i];
}
for (int j=0; j<static_cast<int>(number_of_columns_blocks); ++j) {
if (TransposeBlocks(0, j)) {
column_sizes[j] = (*rMatricespBlocks(0, j)).size1();
} else {
column_sizes[j] = (*rMatricespBlocks(0, j)).size2();
}
ncols += column_sizes[j];
}
// Check consistency of all blocks
for (int i=0; i<static_cast<int>(number_of_rows_blocks); ++i) {
for (int j=0; j<static_cast<int>(number_of_columns_blocks); ++j) {
if (TransposeBlocks(i, j)) {
KRATOS_ERROR_IF((*rMatricespBlocks(i, j)).size2() != row_sizes[i] || (*rMatricespBlocks(i, j)).size1() != column_sizes[j]) << " Not consistent size in block " << i << ", " << j << ".\t" << (*rMatricespBlocks(i, j)).size2() << ", " << (*rMatricespBlocks(i, j)).size1() << " vs " << row_sizes[i] << ", " << row_sizes[j] << std::endl;
} else {
KRATOS_ERROR_IF((*rMatricespBlocks(i, j)).size1() != row_sizes[i] || (*rMatricespBlocks(i, j)).size2() != column_sizes[j]) << " Not consistent size in block " << i << ", " << j << ".\t" << (*rMatricespBlocks(i, j)).size1() << ", " << (*rMatricespBlocks(i, j)).size2() << " vs " << row_sizes[i] << ", " << row_sizes[j] << std::endl;
}
}
}
// Exiting just in case of empty matrix
if ((nrows == 0) || (ncols == 0))
return void();
// We will compute nonzero terms
IndexType* matrix_ptr = new IndexType[nrows + 1];
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(nrows + 1); ++i)
matrix_ptr[i] = 0;
#ifdef KRATOS_DEBUG
IndexType check_non_zero = 0;
DenseMatrix<IndexType> check_non_zero_blocks(number_of_rows_blocks, number_of_columns_blocks);
for (int i=0; i<static_cast<int>(number_of_rows_blocks); ++i) {
for (int j=0; j<static_cast<int>(number_of_columns_blocks); ++j) {
check_non_zero_blocks(i, j) = 0;
}
}
#endif
#pragma omp parallel
{
#pragma omp for
for (int i=0; i<static_cast<int>(number_of_rows_blocks); ++i) {
for (int k=0; k<static_cast<int>(row_sizes[i]); ++k) {
IndexType matrix_cols_aux = 0;
for (int j=0; j<static_cast<int>(number_of_columns_blocks); ++j) {
#ifdef KRATOS_DEBUG
IndexType partial_matrix_cols_aux = 0;
#endif
// Skip if empty matrix
CompressedMatrix& r_matrix = *rMatricespBlocks(i, j);
if (r_matrix.nnz() > 0) {
if (TransposeBlocks(i, j)) {
// We compute the transposed matrix
const SizeType size_system_1 = r_matrix.size1();
const SizeType size_system_2 = r_matrix.size2();
CompressedMatrix transpose(size_system_2, size_system_1);
TransposeMatrix<CompressedMatrix, CompressedMatrix>(transpose, r_matrix);
ComputeNonZeroBlocks(transpose, k, matrix_cols_aux);
#ifdef KRATOS_DEBUG
ComputeNonZeroBlocks(transpose, k, partial_matrix_cols_aux);
#endif
} else {
ComputeNonZeroBlocks(r_matrix, k, matrix_cols_aux);
#ifdef KRATOS_DEBUG
ComputeNonZeroBlocks(r_matrix, k, partial_matrix_cols_aux);
#endif
}
}
#ifdef KRATOS_DEBUG
check_non_zero_blocks(i, j) += partial_matrix_cols_aux;
#endif
}
IndexType& r_matrix_ptr_value = matrix_ptr[std::accumulate(row_sizes.begin(), row_sizes.begin() + i, 0) + k + 1];
#pragma omp atomic
r_matrix_ptr_value += matrix_cols_aux;
#ifdef KRATOS_DEBUG
#pragma omp atomic
check_non_zero += matrix_cols_aux;
#endif
}
}
}
// Auxiliar values
std::partial_sum(matrix_ptr, matrix_ptr + nrows + 1, matrix_ptr);
const SizeType nonzero_values = matrix_ptr[nrows];
#ifdef KRATOS_DEBUG
SizeType total_nnz = 0;
for (int i=0; i<static_cast<int>(number_of_rows_blocks); ++i) {
for (int j=0; j<static_cast<int>(number_of_columns_blocks); ++j) {
const SizeType block_nnz = rMatricespBlocks(i, j)->nnz();
KRATOS_ERROR_IF_NOT(check_non_zero_blocks(i, j) == block_nnz) << "Inconsistent number of non-zero values. Check 0: " << block_nnz << " vs " << check_non_zero_blocks(i, j) << ". Block: " << i << ", " << j << std::endl;
total_nnz += block_nnz;
}
}
KRATOS_ERROR_IF_NOT(check_non_zero == total_nnz) << "Inconsistent number of non-zero values. Check 1: " << total_nnz << " vs " << check_non_zero << std::endl;
KRATOS_ERROR_IF_NOT(nonzero_values == total_nnz) << "Inconsistent number of non-zero values. Check 2: " << total_nnz << " vs " << nonzero_values << std::endl;
#endif
// Initialize matrix with the corresponding non-zero values
rMatrix = CompressedMatrix(nrows, ncols, nonzero_values);
// Fill the new matrix
double* Matrix_values = rMatrix.value_data().begin();
IndexType* Matrix_index1 = rMatrix.index1_data().begin();
IndexType* Matrix_index2 = rMatrix.index2_data().begin();
Matrix_index1[0] = 0;
for (IndexType i = 0; i < nrows; ++i)
Matrix_index1[i+1] = Matrix_index1[i] + (matrix_ptr[i + 1] - matrix_ptr[i]);
#pragma omp parallel
{
#pragma omp for
for (int i=0; i<static_cast<int>(number_of_rows_blocks); ++i) {
for (int k=0; k<static_cast<int>(row_sizes[i]); ++k) {
const IndexType row_beg = matrix_ptr[std::accumulate(row_sizes.begin(), row_sizes.begin() + i, 0) + k];
IndexType row_end = row_beg;
for (int j=0; j<static_cast<int>(number_of_columns_blocks); ++j) {
const SizeType initial_index_column = std::accumulate(column_sizes.begin(), column_sizes.begin() + j, 0);
// Skip if empty matrix
CompressedMatrix& r_matrix = *rMatricespBlocks(i, j);
if (r_matrix.nnz() > 0) {
if (TransposeBlocks(i, j)) {
// We compute the transposed matrix
const SizeType size_system_1 = r_matrix.size1();
const SizeType size_system_2 = r_matrix.size2();
CompressedMatrix transpose(size_system_2, size_system_1);
TransposeMatrix<CompressedMatrix, CompressedMatrix>(transpose, r_matrix);
ComputeAuxiliarValuesBlocks(transpose, Matrix_index2, Matrix_values, k, row_end, initial_index_column, ContributionCoefficients(i, j));
} else {
ComputeAuxiliarValuesBlocks(r_matrix, Matrix_index2, Matrix_values, k, row_end, initial_index_column, ContributionCoefficients(i, j));
}
}
}
}
}
}
// Close the matrix
rMatrix.set_filled(nrows+1, nonzero_values);
// Release memory
delete[] matrix_ptr;
}
/**
* @brief This is a method to check the block containing nonzero values
* @param rMatrix The auxiliar block
* @param CurrentRow The current row computed
* @param rNonZeroColsAux2 The nonzero rows array
*/
static inline void ComputeNonZeroBlocks(
const CompressedMatrix& rMatrix,
const int CurrentRow,
IndexType& rNonZeroColsAux2
)
{
// Get access to aux_K data
const IndexType* aux_matrix_index1 = rMatrix.index1_data().begin();
const IndexType row_begin = aux_matrix_index1[CurrentRow];
const IndexType row_end = aux_matrix_index1[CurrentRow + 1];
for (IndexType j=row_begin; j<row_end; j++) {
++rNonZeroColsAux2;
}
}
/**
* @brief This is a method to compute the contribution of the auxiliar blocks
* @param AuxK The auxiliar block
* @param AuxIndex2 The indexes of the non zero columns
* @param AuxVals The values of the final matrix
* @param CurrentRow The current row computed
* @param RowEnd The last column computed
* @param InitialIndexColumn The initial column index of the auxiliar block in the final matrix
*/
static inline void ComputeAuxiliarValuesBlocks(
const CompressedMatrix& rMatrix,
IndexType* AuxIndex2,
double* AuxVals,
const int CurrentRow,
IndexType& RowEnd,
const SizeType InitialIndexColumn,
const double ContributionCoefficient = 1.0
)
{
// Get access to aux_K data
const double* aux_values = rMatrix.value_data().begin();
const IndexType* aux_Matrix_index1 = rMatrix.index1_data().begin();
const IndexType* aux_Matrix_index2 = rMatrix.index2_data().begin();
const IndexType aux_Matrix_row_begin = aux_Matrix_index1[CurrentRow];
const IndexType aux_Matrix_row_end = aux_Matrix_index1[CurrentRow + 1];
for (IndexType j=aux_Matrix_row_begin; j<aux_Matrix_row_end; j++) {
const IndexType col_index = InitialIndexColumn + aux_Matrix_index2[j];
AuxIndex2[RowEnd] = col_index;
AuxVals[RowEnd] = ContributionCoefficient * aux_values[j];
++RowEnd;
}
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const
{
return "SparseMatrixMultiplicationUtility";
}
/// Print information about this object.
void PrintInfo (std::ostream& rOStream) const
{
rOStream << "SparseMatrixMultiplicationUtility";
}
/// Print object's data.
void PrintData (std::ostream& rOStream) const
{
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
/**
* @brief This method is oriented to merge rows
* @param Column1 The index of the first matrix column
* @param Column1End The last index of the first matrix column
* @param Column2 The index of the second matrix column
* @param Column2End The last index of the second matrix column
* @param Column3 The index of the third matrix column
* @return The resulting row
*/
template <bool TNeedOut, class TIndex>
static TIndex* MergeRows(
const TIndex* Column1,
const TIndex* Column1End,
const TIndex* Column2,
const TIndex* Column2End,
TIndex* Column3
)
{
while(Column1 != Column1End && Column2 != Column2End) {
TIndex c1 = *Column1;
TIndex c2 = *Column2;
if (c1 < c2) {
if (TNeedOut) *Column3 = c1;
++Column1;
} else if (c1 == c2) {
if (TNeedOut) *Column3 = c1;
++Column1;
++Column2;
} else {
if (TNeedOut) *Column3 = c2;
++Column2;
}
++Column3;
}
if (TNeedOut) {
if (Column1 < Column1End) {
return std::copy(Column1, Column1End, Column3);
} else if (Column2 < Column2End) {
return std::copy(Column2, Column2End, Column3);
} else {
return Column3;
}
} else {
return Column3 + (Column1End - Column1) + (Column2End - Column2);
}
}
/**
* @brief This method is oriented to merge rows
* @param rAlpha1 The coefficient of the first matrix
* @param Column1 The index of the first matrix column
* @param Column1End The last index of the first matrix column
* @param Value1 The values of the first matrix
* @param rAlpha2 The coefficient of the second matrix
* @param Column2 The index of the second matrix column
* @param Column2End The last index of the second matrix column
* @param Value2 The values of the second matrix
* @param Column3 The index of the third matrix column
* @param Value3 The values of the third matrix
* @return The resulting row
*/
template <class TIndex, class TValueType>
static TIndex* MergeRows(
const TValueType &rAlpha1,
const TIndex* Column1,
const TIndex* Column1End,
const TValueType *Value1,
const TValueType &rAlpha2,
const TIndex* Column2,
const TIndex* Column2End,
const TValueType *Value2,
TIndex* Column3,
TValueType *Value3
)
{
while(Column1 != Column1End && Column2 != Column2End) {
TIndex c1 = *Column1;
TIndex c2 = *Column2;
if (c1 < c2) {
++Column1;
*Column3 = c1;
*Value3 = rAlpha1 * (*Value1++);
} else if (c1 == c2) {
++Column1;
++Column2;
*Column3 = c1;
*Value3 = rAlpha1 * (*Value1++) + rAlpha2 * (*Value2++);
} else {
++Column2;
*Column3 = c2;
*Value3 = rAlpha2 * (*Value2++);
}
++Column3;
++Value3;
}
while(Column1 < Column1End) {
*Column3++ = *Column1++;
*Value3++ = rAlpha1 * (*Value1++);
}
while(Column2 < Column2End) {
*Column3++ = *Column2++;
*Value3++ = rAlpha2 * (*Value2++);
}
return Column3;
}
/**
* @brief This method is oriented to multiply rows
* @param AColumn The index of the first matrix column
* @param AColumnEnd The last index of the first matrix column
* @param BPtr The array constining the nonzero values per row of the second matrix
* @param BColumn The index of the second matrix column
* @param Column2End The last index of the second matrix column
* @param Tmp1Column Indexes of the columns of first matrix
* @param Tmp2Column Indexes of the columns of second matrix
* @param Tmp3Column Indexes of the columns of third matrix
* @return The resulting row
*/
template <class TIndex>
static TIndex ProdRowWidth(
const TIndex* AColumn,
const TIndex* AColumnEnd,
const TIndex* BPtr,
const TIndex* BColumn,
TIndex* Tmp1Column,
TIndex* Tmp2Column,
TIndex* Tmp3Column
)
{
const TIndex nrow = AColumnEnd - AColumn;
/* No rows to merge, nothing to do */
if (nrow == 0) return 0;
/* Single row, just copy it to output */
if (nrow == 1) return BPtr[*AColumn + 1] - BPtr[*AColumn];
/* Two rows, merge them */
if (nrow == 2) {
int a1 = AColumn[0];
int a2 = AColumn[1];
return MergeRows<false>( BColumn + BPtr[a1], BColumn + BPtr[a1+1], BColumn + BPtr[a2], BColumn + BPtr[a2+1], Tmp1Column) - Tmp1Column;
}
/* Generic case (more than two rows).
*
* Merge rows by pairs, then merge the results together.
* When merging two rows, the result is always wider (or equal).
* Merging by pairs allows to work with short rows as often as possible.
*/
// Merge first two.
TIndex a1 = *AColumn++;
TIndex a2 = *AColumn++;
TIndex c_col1 = MergeRows<true>( BColumn + BPtr[a1], BColumn + BPtr[a1+1], BColumn + BPtr[a2], BColumn + BPtr[a2+1], Tmp1Column ) - Tmp1Column;
// Go by pairs.
while(AColumn + 1 < AColumnEnd) {
a1 = *AColumn++;
a2 = *AColumn++;
TIndex c_col2 = MergeRows<true>( BColumn + BPtr[a1], BColumn + BPtr[a1+1], BColumn + BPtr[a2], BColumn + BPtr[a2+1], Tmp2Column ) - Tmp2Column;
if (AColumn == AColumnEnd) {
return MergeRows<false>( Tmp1Column, Tmp1Column + c_col1, Tmp2Column, Tmp2Column + c_col2, Tmp3Column ) - Tmp3Column;
} else {
c_col1 = MergeRows<true>( Tmp1Column, Tmp1Column + c_col1, Tmp2Column, Tmp2Column + c_col2, Tmp3Column ) - Tmp3Column;
std::swap(Tmp1Column, Tmp3Column);
}
}
// Merge the tail.
a2 = *AColumn;
return MergeRows<false>( Tmp1Column, Tmp1Column + c_col1, BColumn + BPtr[a2], BColumn + BPtr[a2+1], Tmp2Column ) - Tmp2Column;
}
/**
* @brief This method is oriented to multiply rows
* @param AColumn The index of the first matrix column
* @param AColumnEnd The last index of the first matrix column
* @param AValue The values of the first matrix
* @param BPtr The array constining the nonzero values per row of the second matrix
* @param BColumn The index of the second matrix column
* @param BValue The values of the second matrix
* @param OutColumn Indexes of the columns of output matrix
* @param OutValue Values of the columns of output matrix
* @param Tmp2Column Indexes of the columns of second matrix
* @param Tmp2Value Values of the columns of second matrix
* @param Tmp3Column Indexes of the columns of third matrix
* @param Tmp3Value Values of the columns of third matrix
* @return The resulting row
*/
template <class TIndex, class TValueType>
static void ProdRow(
const TIndex* AColumn,
const TIndex* AColumnEnd,
const TValueType *AValue,
const TIndex* BPtr,
const TIndex* BColumn,
const TValueType *BValue,
TIndex* OutColumn,
TValueType *OutValue,
TIndex* Tmp2Column,
TValueType *Tmp2Value,
TIndex* Tmp3Column,
TValueType *Tmp3Value
)
{
const TIndex nrow = AColumnEnd - AColumn;
/* No rows to merge, nothing to do */
if (nrow == 0) return;
/* Single row, just copy it to output */
if (nrow == 1) {
TIndex ac = *AColumn;
TValueType av = *AValue;
const TValueType *bv = BValue + BPtr[ac];
const TIndex* bc = BColumn + BPtr[ac];
const TIndex* be = BColumn + BPtr[ac+1];
while(bc != be) {
*OutColumn++ = *bc++;
*OutValue++ = av * (*bv++);
}
return;
}
/* Two rows, merge them */
if (nrow == 2) {
TIndex ac1 = AColumn[0];
TIndex ac2 = AColumn[1];
TValueType av1 = AValue[0];
TValueType av2 = AValue[1];
MergeRows( av1, BColumn + BPtr[ac1], BColumn + BPtr[ac1+1], BValue + BPtr[ac1], av2, BColumn + BPtr[ac2], BColumn + BPtr[ac2+1], BValue + BPtr[ac2], OutColumn, OutValue );
}
/* Generic case (more than two rows).
*
* Merge rows by pairs, then merge the results together.
* When merging two rows, the result is always wider (or equal).
* Merging by pairs allows to work with short rows as often as possible.
*/
// Merge first two.
TIndex ac1 = *AColumn++;
TIndex ac2 = *AColumn++;
TValueType av1 = *AValue++;
TValueType av2 = *AValue++;
TIndex* tm1_col = OutColumn;
TValueType *tm1_val = OutValue;
TIndex c_col1 = MergeRows( av1, BColumn + BPtr[ac1], BColumn + BPtr[ac1+1], BValue + BPtr[ac1], av2, BColumn + BPtr[ac2], BColumn + BPtr[ac2+1], BValue + BPtr[ac2], tm1_col, tm1_val ) - tm1_col;
// Go by pairs.
while(AColumn + 1 < AColumnEnd) {
ac1 = *AColumn++;
ac2 = *AColumn++;
av1 = *AValue++;
av2 = *AValue++;
TIndex c_col2 = MergeRows( av1, BColumn + BPtr[ac1], BColumn + BPtr[ac1+1], BValue + BPtr[ac1], av2, BColumn + BPtr[ac2], BColumn + BPtr[ac2+1], BValue + BPtr[ac2], Tmp2Column, Tmp2Value ) - Tmp2Column;
c_col1 = MergeRows( amgcl::math::identity<TValueType>(), tm1_col, tm1_col + c_col1, tm1_val, amgcl::math::identity<TValueType>(), Tmp2Column, Tmp2Column + c_col2, Tmp2Value, Tmp3Column, Tmp3Value ) - Tmp3Column;
std::swap(Tmp3Column, tm1_col);
std::swap(Tmp3Value, tm1_val);
}
// Merge the tail if there is one.
if (AColumn < AColumnEnd) {
ac2 = *AColumn++;
av2 = *AValue++;
c_col1 = MergeRows( amgcl::math::identity<TValueType>(), tm1_col, tm1_col + c_col1, tm1_val, av2, BColumn + BPtr[ac2], BColumn + BPtr[ac2+1], BValue + BPtr[ac2], Tmp3Column, Tmp3Value ) - Tmp3Column;
std::swap(Tmp3Column, tm1_col);
std::swap(Tmp3Value, tm1_val);
}
// If we are lucky, tm1 now points to out.
// Otherwise, copy the results.
if (tm1_col != OutColumn) {
std::copy(tm1_col, tm1_col + c_col1, OutColumn);
std::copy(tm1_val, tm1_val + c_col1, OutValue);
}
return;
}
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; // Class SparseMatrixMultiplicationUtility
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
// /****************************** INPUT STREAM FUNCTION ******************************/
// /***********************************************************************************/
//
// template<class TPointType, class TPointerType>
// inline std::istream& operator >> (std::istream& rIStream,
// SparseMatrixMultiplicationUtility& rThis);
//
// /***************************** OUTPUT STREAM FUNCTION ******************************/
// /***********************************************************************************/
//
// template<class TPointType, class TPointerType>
// inline std::ostream& operator << (std::ostream& rOStream,
// const SparseMatrixMultiplicationUtility& rThis)
// {
// return rOStream;
// }
///@}
} // namespace Kratos.
#endif // KRATOS_TREE_CONTACT_SEARCH_H_INCLUDED defined
|
BaseTensor.h | /* BaseTensor
* This library is distributed WITHOUT ANY WARRANTY.
* Author: Zhao Jize <zhaojize@outlook.com>
* All copyright reserved by the author.
*/
/* 1, 5, 9, 13, 17, 21*/
/*v2.0.1*/
#if !defined __BASE_TENSOR_H
#define __BASE_TENSOR_H
#include <sys/stat.h>
#include <fcntl.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/mman.h>
#include <fstream>
#include <iostream>
#include <initializer_list>
#include <string.h>
#include <tuple>
#include <assert.h>
#include <omp.h>
#include "globalconfig.h"
#include "cppblasex.h"
#include "cppblas.h"
#include "cpplapack.h"
#include "stringname.h"
#include "LLMOperation.h"
#include "TypeCompatible.h"
#include "TensorStruct.h"
#include "miolapack.h"
template<typename T> class Vector;
template<typename DTTYPE>
class BaseTensor : public TensorStruct
{
using RDTTYPE = typename istc::SVDReal<DTTYPE>::type;
public:
long fileSize; // the size of tensor in disk, in unit of byte
DTTYPE* tensorData;
protected:
BLASINT maxDataSize;
public:
/********************constructor and destructor*********************/
/*read from file*/
BaseTensor(const char* filename, const bool &ifremoved, const BLASINT &readtype = 0);
/*read from stream*/
BaseTensor(std::ifstream &ifs);
/*initialize tensor from matrix*/
template<typename T = DTTYPE> BaseTensor(const uint32_t &tr, const BLASINT* rd, const T* data = nullptr, const BLASINT &dtsize = 0);
/*parameter changable constructor*/
template<typename T = DTTYPE> BaseTensor(std::initializer_list<BLASINT> rd, const T* data = nullptr, const BLASINT &dtsize = 0);
/*tensor contraction 1: T_...i... T^*_...i...*/
BaseTensor(const BaseTensor &bt, const uint32_t &idx, void* tmpdata, const BLASINT &tdsize);
/*tensor contraction 2 (btl(A) != btr(B)) :
(S) Sequential contraction : A_ijk B_mjn = R_ikmn
(D) Direct product like : A_ijqkl B_mnqst = R_ijmnklst
(I) Insert right index into left : A_ijk B_mjn = R_imnk
*/
BaseTensor(const char &cttype, const BaseTensor &btl, const uint32_t &il, const BaseTensor &btr, const uint32_t &ir, void* tmpdata, const BLASINT &tdsize);
/*internal index contraction*/
BaseTensor(const BaseTensor &bt, const uint32_t &il, const uint32_t &ir);
/*direct product of two tensors with the same rank : T_{iq;jw;ke;lr;mt;...} = A_{ijklm...} B_{qwert...}, iq(jw, ke, lr, mt, ...) is denoted by one index in T*/
BaseTensor(const BaseTensor &btl, const BaseTensor &btr, void* tmpdata, const BLASINT &tdsize);
/*copy and move constructor*/
BaseTensor(const BaseTensor &bt, const DTTYPE &scale = 1.0);
BaseTensor(BaseTensor<DTTYPE> &&bt, const DTTYPE &scale = 1.0);
virtual ~BaseTensor();
/********************end of constructor and destructor********************/
/**************************test the status of tensor*************************/
inline BLASINT getMaxDataSize() const {return maxDataSize;}
/**************************reset maxDataSize and reallocate tensorData*****************************/
void reSetMaxDataSize(const BLASINT &maxsize);
/***********************change the status of a tensor*******************/
/**************tensorData can only be changed by the following functions if a tensor is contructed************/
template<typename T = DTTYPE> typename std::enable_if<istc::isComplex<T>::value && std::is_same<T, DTTYPE>::value, bool>::type fill(const double &min, const double &max, const uint32_t &rdtype = 0);
template<typename T = DTTYPE> typename std::enable_if<!istc::isComplex<T>::value && std::is_same<T, DTTYPE>::value, bool>::type fill(const double &min, const double &max, const uint32_t &rdtype = 0);
void fill(const DTTYPE &el);
template<typename T> bool fill(const T* data, const BLASINT &dtsize);
/*resize the tensor, dataSize and maxDataSize can be changed.
* data may be destroyed
* */
bool resize(std::initializer_list<BLASINT> rd);
bool resize(const uint32_t &tr, const BLASINT* rd);
bool truncateRankDim(const uint32_t &idx, const BLASINT &resdim); // resdim > 0
/*********************end of the initializer****************************/
inline DTTYPE &operator[](const BLASINT &i){return tensorData[i];}
BaseTensor &operator = (const BaseTensor &bt);
BaseTensor &operator *= (const DTTYPE &s);
BaseTensor &operator /= (const DTTYPE &s);
bool operator == (const BaseTensor &bt) noexcept;
/*Operations on one leg. These operations should retain the structure of the tensor*/
/*1. T_...i... * lambda[i], no summation over i */
template<typename TLP> bool legProduct(const uint32_t &il, const TLP* lambda, const BLASINT &lmbdsize);
/*lambda should be rank-1 tensor*/
template<typename TLP> bool legProduct(const uint32_t &il, const Vector<TLP> &lambda);
/*2. T_...i... / lambda[i], no summation over i*/
template<typename TLP> bool legDivision(const uint32_t &il, const TLP* lambda, const BLASINT &lmbdsize);
/*lambda should be rank-1 tensor*/
template<typename TLP> bool legDivision(const uint32_t &il, const Vector<TLP> &lambda);
/*change the position of the index*/
/*shift index "il" before "iprev"*/
bool shiftBefore(const uint32_t &il, const uint32_t &iprev, void* tmpdata, const BLASINT &tdsize);
bool shiftBefore(const uint32_t &il, const uint32_t &iprev, BaseTensor<DTTYPE> &bt) const;
/*shift index "il" after "iback"*/
bool shiftAfter(const uint32_t &il, const uint32_t &iback, void* tmpdata, const BLASINT &tdsize);
bool shiftAfter(const uint32_t &il, const uint32_t &iback, BaseTensor<DTTYPE> &bt) const;
/*permute the index of the tensor*/
bool permute(std::initializer_list<uint32_t> ri, void* tmpdata, const BLASINT &tdsize);
/*move the leg to the leftmost and perform singular value decomposition, T = U\lambda V^T,
* thistype => see rightSVD(...);
* type = 'S' or 'A'
*/
template<typename TSR, typename = typename std::enable_if<std::is_base_of<BaseTensor<DTTYPE>, TSR>::value>::type> BLASINT leftSVD(const char &thistype, const uint32_t &index, const char &Utype, Vector<RDTTYPE> &lambda, TSR &U, void* pool, const BLASINT &poolsize);
auto leftSVD(const char &thistype, const uint32_t &index, const char &Utype, void* pool, const BLASINT &poolsize);
/*move the leg to the rightmost and perform singular value decomposition, T = U\lambda V^T
* thistype = 'K', 'O' or 'D' :
* 'K' -> the data of tensor is kept as before;
* 'O' -> the tensor is overlapped by U with the column min(M, N)
* 'D' -> the data of tensor is destroyed.
*
* type = 'S' or 'A', see the manual of lapack
*/
template<typename TSR, typename = typename std::enable_if<std::is_base_of<BaseTensor<DTTYPE>, TSR>::value>::type> BLASINT rightSVD(const char &thistype, const uint32_t &index, const char &VDtype, Vector<RDTTYPE> &lambda, TSR &VD, void* pool, const BLASINT &poolsize);
auto rightSVD(const char &thistype, const uint32_t &index, const char &VDtype, void* pool, const BLASINT &poolsize);
/*leftSVDD : SVD using gesdd. For more information, see the manual of lapack*/
template<typename TSR, typename = typename std::enable_if<std::is_base_of<BaseTensor<DTTYPE>, TSR>::value>::type> BLASINT leftSVDD(const char &thistype, const uint32_t &index, const char &SVDtype, TSR &U, Vector<RDTTYPE> &lambda, TSR &VD, void* pool, const BLASINT &poolsize);
/*rightSDD : SVD using gesdd*/
template<typename TSR, typename = typename std::enable_if<std::is_base_of<BaseTensor<DTTYPE>, TSR>::value>::type> BLASINT rightSVDD(const char &thistype, const uint32_t &index, const char &SVDtype, TSR &U, Vector<RDTTYPE> &lambda, TSR &VD, void* pool, const BLASINT &poolsize);
/*high order SVD, T_...i...j...k...l... = U_i\alpha U_j\beta U_k\gamma U_l\keppa S_...\alpha...\beta...\gamma...\keppa...
* here all U are unitary matrix. After this decomposition, the core tensor is stored locally.
*/
/*class 1: no truncation, Us are unitary matrix, core tensor is the same size as original tensor*/
void highOrderLSVD(const uint32_t &num, const uint32_t* index, DTTYPE**U, const BLASINT* Usize, void* lambdamax, const BLASINT &lmbdsize, void* pool, const BLASINT &poolsize);
void highOrderLSVD(const uint32_t &num, const uint32_t* index, BaseTensor**U, Vector<RDTTYPE>** lambda, void* pool, const BLASINT &poolsize);
/*class 2: no trunction in U and lambda, core tensor are truncated according to dimtru*/
void highOrderLSVD(const uint32_t &num, const uint32_t* index, const BLASINT* dimtru, DTTYPE** U, const BLASINT* Usize, void* lambdamax, const BLASINT &lmbdsize, void* pool, const BLASINT &poolsize);
void highOrderLSVD(const uint32_t &num, const uint32_t* index, const BLASINT* dimtru, BaseTensor** U, Vector<RDTTYPE>** lambda, void* pool, const BLASINT &poolsize);
/*class 3: use gesvdx for partial SVD, U is truncated and core tensor are truncated according to dimtru*/
void highOrderLSVDx(const uint32_t &num, const uint32_t* index, const BLASINT* dimtru, DTTYPE** U, const BLASINT* Usize, void* lambdamax, const BLASINT &lmbdsize, const BLASINT* dimtrlbd, void* pool, const BLASINT &poolsize);
void highOrderLSVDx(const uint32_t &num, const uint32_t* index, const BLASINT* dimtru, BaseTensor** U, Vector<RDTTYPE>** lambda, void* pool, const BLASINT &poolsize);
/*normalize
* ntype = 0 : v*v^\dagger = 1.0
* ntype = 1 : v/abs(max(v))
* ntype = ...
*/
template<typename RDT, typename = typename std::enable_if<std::is_floating_point<RDT>::value>::type> auto normalize(const BLASINT &ntype, const RDT &nr = 1.0);
template<typename RDT, typename = typename std::enable_if<std::is_floating_point<RDT>::value>::type> auto norm(const BLASINT &ntype);
/*complex conjugate*/
void cconj(void) noexcept;
/*save Tensor to disk*/
virtual bool saveTensorToFile(const char* filename) const; // binary
virtual bool saveTensorToStream(std::ofstream &ofs) const;
bool readTensorFromStream(std::ifstream &ifs);
bool readTensorFromMemory(char* buffer);
/*print tensorData to file*/
bool printTensorToFile(const char* suffixname, const streamsize &width, const char &type = 'D'); // ascii, type ='A' or 'D'
/*check the tensor*/
void checkBaseTensor() const;
private:
BaseTensor() = delete; // default constructor is forbidden!
void setFileSize() noexcept;
bool shiftBefore_(const uint32_t &il, const uint32_t &iprev, void* tmpdata, const BLASINT &tdsize);
bool shiftBefore_(const uint32_t &il, const uint32_t &iprev, BaseTensor<DTTYPE> &bt) const;
bool shiftAfter_(const uint32_t &il, const uint32_t &iback, void* tmpdata, const BLASINT &tdsize);
bool shiftAfter_(const uint32_t &il, const uint32_t &iback, BaseTensor<DTTYPE> &bt) const;
public:
template<typename T> friend void tensorContraction(const char &cttype, const BaseTensor<T> &btl, const uint32_t &il, const BaseTensor<T> &btr, const uint32_t &ir, BaseTensor<T> &bc, void* tmpdata, const BLASINT &tdsize);
};
template <typename DTTYPE>
BaseTensor<DTTYPE>::BaseTensor(const char* filename, const bool &ifremoved, const BLASINT &readtype)
{
if (readtype == 0)
{
ifstream file(filename, ios::in);
if (!file) assert(0);
assert(readTensorFromStream(file));
file.close();
}
else if (readtype == 1)
{
FILE* file = fopen(filename, "r");
if (!file) assert(0);
struct stat filestat;
if (fstat(fileno(file), &filestat) < 0) assert(0);
char* buffer = new(std::nothrow) char[filestat.st_size];
assert(buffer);
if (fread(buffer, sizeof(char), filestat.st_size, file) != filestat.st_size) assert(0);
assert(readTensorFromMemory(buffer));
delete []buffer;
fclose(file);
}
else
{
int filedes = open(filename, O_RDONLY);
if (filedes < 0) assert(0);
struct stat filestat;
if (fstat(filedes, &filestat) < 0) assert(0);
char* buffer = (char*)mmap(0, filestat.st_size, PROT_READ, MAP_PRIVATE, filedes, 0);
if (!buffer) assert(0);
if (madvise(buffer, filestat.st_size, MADV_WILLNEED | MADV_SEQUENTIAL) != 0)
{
munmap(buffer, filestat.st_size);
close(filedes);
assert(0);
}
assert(readTensorFromMemory(buffer));
close(filedes);
munmap(buffer, filestat.st_size);
}
if (ifremoved) remove(filename);
maxDataSize = dataSize;
#ifdef CHECK_BASETENSOR
checkBaseTensor();
#endif
}
/*read from stream*/
template <typename DTTYPE>
BaseTensor<DTTYPE>::BaseTensor(ifstream &ifs)
{
assert(readTensorFromStream(ifs));
maxDataSize = dataSize;
#ifdef CHECK_BASETENSOR
checkBaseTensor();
#endif
}
/*initialize a tensor from a matrix*/
template<typename DTTYPE> template<typename T>
BaseTensor<DTTYPE>::BaseTensor(const uint32_t &tr, const BLASINT* rd, const T* data, const BLASINT &dtsize) : TensorStruct(tr, rd)
{
maxDataSize = dataSize;
tensorData = new(std::nothrow) DTTYPE[maxDataSize];
assert(tensorData);
if (dtsize > 0)
{
if (dtsize >= dataSize)
{
for (BLASINT i = 0; i < dataSize; i++) tensorData[i] = data[i]; // "copy" may be wrong due to possible different data type!
}
else std::cout << "Be careful that dtsize is smaller than dataSize!" << std::endl;
}
setFileSize();
#ifdef CHECK_BASETENSOR
checkBaseTensor();
#endif
}
template<typename DTTYPE> template<typename T>
BaseTensor<DTTYPE>::BaseTensor(std::initializer_list<BLASINT> rd, const T* data, const BLASINT &dtsize) : TensorStruct(rd)
{
maxDataSize = dataSize;
tensorData = new(std::nothrow) DTTYPE[maxDataSize];
assert(tensorData);
if (dtsize > 0)
{
if (dtsize >= dataSize)
{
for (BLASINT i = 0; i < dataSize; i++) tensorData[i] = data[i];
}
else std::cout << "Be careful that dtsize is smaller than dataSize!" << std::endl;
}
setFileSize();
#ifdef CHECK_BASETENSOR
checkBaseTensor();
#endif
}
/*tensor contraction 1: T_...i... T^*_...i...*/
template<typename DTTYPE>
BaseTensor<DTTYPE>::BaseTensor(const BaseTensor &bt, const uint32_t &idx, void* tmpdata, const BLASINT &tdsize) : TensorStruct(bt, idx)
{
maxDataSize = dataSize;
BLASINT dm = bt.rankDim[idx];
tensorData = new(std::nothrow) DTTYPE[maxDataSize];
assert(tensorData);
if (idx == 0)
{
BLASINT dr = bt.dataSize/dm;
gemm('C', 'N', dr, dr, dm, 1.0, bt.tensorData, dr, bt.tensorData, dr, 0.0, tensorData, dr);
if (typeid(DTTYPE) != typeid(real(DTTYPE()))) // Complex
{
for (BLASINT i = 0; i < dataSize; i++) tensorData[i] = conj(tensorData[i]);
}
}
else if (idx+1 == bt.getTensorRank())
{
BLASINT dl = bt.getDataSize()/dm;
gemm('N', 'C', dl, dl, dm, 1.0, bt.tensorData, dm, bt.tensorData, dm, 0.0, tensorData, dl);
}
else
{
BLASINT dl = 1;
for (uint32_t i = 0; i < idx; i++) dl *= bt.getRankDim(i);
BLASINT dr = 1;
for (uint32_t i = idx+1; i < bt.getTensorRank(); i++) dr *= bt.getRankDim(i);
assert(dl*dm*dr == bt.dataSize);
DTTYPE* localtmpdata = reinterpret_cast<DTTYPE*> (tmpdata);
BLASINT dmr = dm*dr;
if (tdsize >= bt.getDataSize()*sizeof(DTTYPE))
{
for (BLASINT l = 0; l < dl; l++) llmopr::fastTransposeTo(dm, dr, bt.tensorData+l*dmr, dr, localtmpdata+l*dmr, dm);
gemm('N', 'C', dl*dr, dl*dr, dm, 1.0, localtmpdata, dm, localtmpdata, dm, 0.0, tensorData, dl*dr);
}
else if (tdsize >= dmr*sizeof(DTTYPE))
{
for (BLASINT l = 0; l < dl; l++) llmopr::transposeOnsite(dm, dr, bt.tensorData+l*dmr, localtmpdata);
gemm('N', 'C', dl*dr, dl*dr, dm, 1.0, bt.tensorData, dm, bt.tensorData, dm, 0.0, tensorData, dl*dr);
/*restore the data in bt*/
for (BLASINT l = 0; l < dl; l++) llmopr::transposeOnsite(dr, dm, bt.tensorData+l*dmr, localtmpdata);
}
else
{
std::cout << "tmpdata is too small, it is " << tdsize << ", but it should be at least " << dm*dr*sizeof(DTTYPE) << std::endl;
for (BLASINT l = 0; l < dl; l++) llmopr::transposeOnsite(dm, dr, bt.tensorData+l*dmr);
gemm('N', 'C', dl*dr, dl*dr, dm, 1.0, bt.tensorData, dm, bt.tensorData, dm, 0.0, tensorData, dl*dr);
/*restore the data in bt*/
for (BLASINT l = 0; l < dl; l++) llmopr::transposeOnsite(dr, dm, bt.tensorData+l*dmr);
}
}
setFileSize();
#ifdef CHECK_BASETENSOR
checkBaseTensor();
#endif
}
/* tensor contraction : cttype = 'S', 'D', or 'I'*/
template<typename DTTYPE>
BaseTensor<DTTYPE>::BaseTensor(const char &cttype, const BaseTensor &btl, const uint32_t &il, const BaseTensor &btr, const uint32_t &ir, void* tmpdata, const BLASINT &tdsize) : TensorStruct(btl, il, btr, ir, cttype)
{
assert(&btl != &btr);
#ifdef DEBUG_BASETENSOR_TIME_cbibitt
std::cout<<std::endl<<"***start cbibitt :: " <<std::endl;
clock_t time_begin = clock();
#endif
maxDataSize = dataSize;
/*left dim*/
BLASINT Dll = 1;
for (uint32_t i = 0; i < il; i++) Dll *= btl.getRankDim(i);
BLASINT Dlm = btl.getRankDim(il);
BLASINT Dlr = (btl.getDataSize()/Dll)/Dlm;
/*right dim*/
BLASINT Drl = 1;
for (uint32_t i = 0; i < ir; i++) Drl *= btr.getRankDim(i);
BLASINT Drm = btr.getRankDim(ir);
BLASINT Drr = (btr.getDataSize()/Drl)/Drm;
assert(dataSize == Dll*Dlr*Drl*Drr);
assert(Dlm == Drm);
DTTYPE* localtmpdata = reinterpret_cast<DTTYPE*>(tmpdata);
tensorData = new(std::nothrow) DTTYPE[maxDataSize];
assert(tensorData);
/*contraction*/
if (cttype == 'S')
{
char transl;
char transr = 'T';
BLASINT ldl;
BLASINT ldr;
if (il == 0)
{
transl = 'T';
ldl = Dll*Dlr;
}
else if (il+1 == btl.getTensorRank())
{
transl = 'N';
ldl = Dlm;
}
else
{
BLASINT dlmr = Dlm*Dlr;
if (tdsize >= btl.getDataSize()*sizeof(DTTYPE))
{
#pragma omp parallel for schedule(dynamic, 1)
for (BLASINT l = 0; l < Dll; l++) llmopr::fastTransposeTo(Dlm, Dlr, btl.tensorData+l*dlmr, Dlr, localtmpdata+l*dlmr, Dlm);
copy(btl.getDataSize(), localtmpdata, 1, btl.tensorData, 1);
}
else if (tdsize >= dlmr*sizeof(DTTYPE))
{
for (BLASINT l = 0; l < Dll; l++) llmopr::transposeOnsite(Dlm, Dlr, btl.tensorData+l*dlmr, localtmpdata);
}
else
{
std::cout << "warning : tmpdata is too small, tdsize is " << tdsize << ", it should be at leat " << dlmr*sizeof(DTTYPE) <<std::endl;
for (BLASINT l = 0; l < Dll; l++) llmopr::transposeOnsite(Dlm, Dlr, btl.tensorData+l*dlmr);
}
transl = 'N';
ldl = Dlm;
}
const DTTYPE* psour = btr.tensorData;
if (ir == 0)
{
transr = 'N';
ldr = Drl*Drr;
}
else if (ir+1 == btr.getTensorRank())
{
transr = 'T';
ldr = Drm;
}
else
{
BLASINT drmr = Drm*Drr;
if (tdsize >= btr.getDataSize()*sizeof(DTTYPE))
{
#pragma omp parallel for schedule(dynamic, 1)
for (BLASINT l = 0; l < Drl; l++) llmopr::fastTransposeTo(Drm, Drr, btr.tensorData+l*drmr, Drr, localtmpdata+l*drmr, Drm);
psour = localtmpdata;
}
else if (tdsize >= drmr*sizeof(DTTYPE))
{
for (BLASINT l = 0; l < Drl; l++) llmopr::transposeOnsite(Drm, Drr, btr.tensorData+l*drmr, localtmpdata);
}
else
{
std::cout << "warning : tmpdata is too small, tdsize is " << tdsize << ", it should be at least "<<drmr*sizeof(DTTYPE)<<std::endl;
for (BLASINT l = 0; l < Drl; l++) llmopr::transposeOnsite(Drm, Drr, btr.tensorData+l*drmr);
}
transr = 'T';
ldr = Drm;
}
gemm(transl, transr, Dll*Dlr, Drl*Drr, Dlm, 1.0, btl.tensorData, ldl, psour, ldr, 0.0, tensorData, Drl*Drr);
/*restore the data in btr*/
if (ir != 0 && ir+1 != btr.getTensorRank() && tdsize < btr.getDataSize()*sizeof(DTTYPE))
{
BLASINT drmr = Drm*Drr;
if (tdsize >= drmr*sizeof(DTTYPE))
{
for (BLASINT l = 0; l < Drl; l++) llmopr::transposeOnsite(Drr, Drm, btr.tensorData+l*drmr, localtmpdata);
}
else
{
for (BLASINT l = 0; l < Drl; l++) llmopr::transposeOnsite(Drr, Drm, btr.tensorData+l*drmr);
}
}
/*restore the data in btl*/
if (il != 0 && il+1 != btl.getTensorRank())
{
BLASINT dlmr = Dlm*Dlr;
if (tdsize >= btl.getDataSize()*sizeof(DTTYPE))
{
#pragma omp parallel for schedule(dynamic, 1)
for (BLASINT l = 0; l < Dll; l++) llmopr::fastTransposeTo(Dlr, Dlm, btl.tensorData+l*dlmr, Dlm, localtmpdata+l*dlmr, Dlr);
copy(btl.getDataSize(), localtmpdata, 1, btl.tensorData, 1);
}
else if (tdsize >= dlmr*sizeof(DTTYPE))
{
for (BLASINT l = 0; l < Dll; l++) llmopr::transposeOnsite(Dlr, Dlm, btl.tensorData+l*dlmr, localtmpdata);
}
else
{
for (BLASINT l = 0; l < Dll; l++) llmopr::transposeOnsite(Dlr, Dlm, btl.tensorData+l*dlmr);
}
}
}
else if (cttype == 'D')
{
if (Drl == 1 && Dll == 1)
{
gemm('T', 'N', Dlr, Drr, Dlm, 1.0, btl.tensorData, Dlr, btr.tensorData, Drr, 0.0, tensorData, Drr);
}
else if (Drl == 1 && Dlr == 1)
{
gemm('N', 'N', Dll, Drr, Dlm, 1.0, btl.tensorData, Dlm, btr.tensorData, Drr, 0.0, tensorData, Drr);
}
else if (Drl == 1 && tdsize >= btl.getDataSize()*sizeof(DTTYPE))
{
llmopr::transposeMRto(Dll, Dlm, Dlr, btl.tensorData, localtmpdata);
gemm('N', 'N', Dll*Dlr, Drr, Dlm, 1.0, localtmpdata, Dlm, btr.tensorData, Drr, 0.0, tensorData, Drr);
}
else if (Dlr == 1 && Drr == 1)
{
gemm('N', 'T', Dll, Drl, Dlm, 1.0, btl.tensorData, Dlm, btr.tensorData, Drm, 0.0, tensorData, Drl);
}
else if (Dlr == 1 && tdsize >= btr.getDataSize()*sizeof(DTTYPE))
{
llmopr::transposeLMto(Drl, Drm, Drr, btr.tensorData, localtmpdata);
gemm('N', 'N', Dll, Drl*Drr, Dlm, 1.0, btl.tensorData, Dlm, localtmpdata, Drl*Drr, 0.0, tensorData, Drl*Drr);
}
else if (Dll == 1 && Drr == 1)
{
gemm('N', 'N', Drl, Dlr, Dlm, 1.0, btl.tensorData, Dlm, btr.tensorData, Drm, 0.0, tensorData, Dlr);
}
else if (Drr == 1)
{
#pragma omp parallel for schedule(dynamic, 1)
for (BLASINT l = 0; l < Dll; l++)
{
const DTTYPE* ltdata = btl.tensorData+l*Dlm*Dlr;
DTTYPE* localdata = tensorData+l*Drl*Drr;
gemm('N', 'N', Drl, Drr, Dlm, 1.0, ltdata, Dlr, btr.tensorData, Drm, 0.0, localdata, Drr);
}
}
else
{
#pragma omp parallel for schedule(dynamic, 1)
for (BLASINT lr = 0; lr < Dll*Drl; lr++)
{
const BLASINT l = lr/Drl;
const BLASINT r = lr%Drl;
const DTTYPE* ltdata = btl.tensorData+l*Dlm*Dlr;
const DTTYPE* rtdata = btr.tensorData+r*Drm*Drr;
DTTYPE* localdata = tensorData+(l*Drl+r)*Dlr*Drr;
gemm('T', 'N', Dlr, Drr, Dlm, 1.0, ltdata, Dlr, rtdata, Drr, 0.0, localdata, Drr);
}
}
}
else // (cttype == 'I')
{
if (Dll*Drl > 10)
{
#pragma omp parallel for schedule(dynamic, 1)
for (BLASINT lr = 0; lr < Dll*Drl; lr++)
{
const BLASINT l = lr/Drl;
const BLASINT r = lr%Drl;
const DTTYPE* ltdata = btl.tensorData+l*Dlm*Dlr;
const DTTYPE* rtdata = btr.tensorData+r*Drm*Drr;
DTTYPE* localdata = tensorData+(l*Drl+r)*Drr*Dlr;
gemm('T', 'N', Drr, Dlr, Dlm, 1.0, rtdata, Drr, ltdata, Dlr, 0.0, localdata, Dlr);
}
}
else
{
for (BLASINT lr = 0; lr < Dll*Drl; lr++)
{
const BLASINT l = lr/Drl;
const BLASINT r = lr%Drl;
const DTTYPE* ltdata = btl.tensorData+l*Dlm*Dlr;
const DTTYPE* rtdata = btr.tensorData+r*Drm*Drr;
DTTYPE* localdata = tensorData+(l*Drl+r)*Drr*Dlr;
gemm('T', 'N', Drr, Dlr, Dlm, 1.0, rtdata, Drr, ltdata, Dlr, 0.0, localdata, Dlr);
}
}
}
setFileSize();
#ifdef CHECK_BASETENSOR
checkBaseTensor();
#endif
#ifdef DEBUG_BASETENSOR_TIME_cbibitt
std::cout<<"***Time on BaseTensor(cbibitt) is " << double(clock()-time_begin)/CLOCKS_PER_SEC << std::endl<<std::endl;
#endif
}
/*internal index contraction*/
template<typename DTTYPE>
BaseTensor<DTTYPE>::BaseTensor(const BaseTensor &bt, const uint32_t &il, const uint32_t &ir) : TensorStruct(bt, il, ir)
{
assert(il < ir);
BLASINT Dl = 1;
for (uint32_t i = 0; i < il; i++) Dl *= bt.getRankDim(i);
BLASINT Dm = 1;
for (uint32_t i = il+1; i < ir; i++) Dm *= bt.getRankDim(i);
BLASINT Dr = 1;
for (uint32_t i = ir+1; i < bt.getTensorRank(); i++) Dr *= bt.getRankDim(i);
BLASINT Dil = bt.getRankDim(il);
assert(Dil == bt.getRankDim(ir));
maxDataSize = dataSize;
tensorData = new(std::nothrow) DTTYPE[maxDataSize];
assert(tensorData);
for (BLASINT l = 0; l < Dl; l++)
{
for (BLASINT m = 0; m < Dm; m++)
{
for (BLASINT r = 0; r < Dr; r++)
{
DTTYPE sum = 0.0;
for (BLASINT k = 0; k < Dil; k++) sum += bt.tensorData[(((l*Dil+k)*Dm+m)*Dil+k)*Dr+r];
tensorData[(l*Dm+m)*Dr+r] = sum;
}
}
}
setFileSize();
#ifdef CHECK_BASETENSOR
checkBaseTensor();
#endif
}
template<typename DTTYPE>
BaseTensor<DTTYPE>::BaseTensor(const BaseTensor &btl, const BaseTensor &btr, void* tmpdata, const BLASINT &tdsize)
{
assert(btl.tensorRank == btr.tensorRank);
tensorRank = btl.tensorRank + btr.tensorRank;
assert(tensorRank <= MAXTRK);
for (unsigned i = 0; i < btl.tensorRank; i++) rankDim[i] = btl.rankDim[i];
for (unsigned i = 0; i < btr.tensorRank; i++) rankDim[btl.tensorRank+i] = btr.rankDim[i];
dataSize = btl.dataSize * btr.dataSize;
maxDataSize = dataSize;
/*tensorData should not be nullptr*/
tensorData = new(std::nothrow) DTTYPE[maxDataSize];
assert(tensorData);
for (BLASINT i = 0; i < btl.dataSize; i++)
{
for (BLASINT j = 0; j < btr.dataSize; j++) tensorData[i*btr.dataSize+j] = btl.tensorData[i] * btr.tensorData[j];
}
for (unsigned i = 0; i+1 < btr.tensorRank; i++) shiftBefore(btl.tensorRank+i, 2*i+1, tmpdata, tdsize);
for (unsigned i = 0; i < btl.tensorRank; i++) rankCombination(i, 2);
setFileSize();
#ifdef CHECK_BASETENSOR
checkBaseTensor();
#endif
}
template <typename DTTYPE>
BaseTensor<DTTYPE>::BaseTensor(const BaseTensor &bt, const DTTYPE &scale) : TensorStruct(bt)
{
fileSize = bt.fileSize;
maxDataSize = dataSize;
tensorData = new(std::nothrow) DTTYPE[maxDataSize];
assert(tensorData);
memcpy(tensorData, bt.tensorData, dataSize*sizeof(DTTYPE));
scal(dataSize, scale, tensorData, 1);
#ifdef CHECK_BASETENSOR
checkBaseTensor();
#endif
}
template<typename DTTYPE>
BaseTensor<DTTYPE>::BaseTensor(BaseTensor<DTTYPE> &&bt, const DTTYPE &scale) : TensorStruct(bt)
{
std::cout<<"move base to base"<<endl;
fileSize = bt.fileSize;
maxDataSize = bt.getMaxDataSize();
tensorData = bt.tensorData;
if (scale != 1.0) scal(this->dataSize, scale, tensorData, 1);
bt.tensorData = nullptr;
bt.dataSize = 0;
bt.maxDataSize = 0;
#ifdef CHECK_BASETENSOR
this->checkBaseTensor();
#endif
}
template<typename DTTYPE>
BaseTensor<DTTYPE>::~BaseTensor()
{
if (tensorData != nullptr)
{
delete []tensorData;
tensorData = nullptr;
}
}
template<typename DTTYPE>
void BaseTensor<DTTYPE>::reSetMaxDataSize(const BLASINT &maxsize)
{
maxDataSize = maxsize;
delete []tensorData;
tensorData = new(std::nothrow) DTTYPE[maxDataSize];
assert(tensorData);
}
/*********************initialize tensorData of a tensor*****************/
template<typename DTTYPE> template<typename T>
typename std::enable_if<istc::isComplex<T>::value && std::is_same<T, DTTYPE>::value, bool>::type BaseTensor<DTTYPE>::fill(const double &min, const double &max, const uint32_t &rdtype)
{
if (max <= min)
{
std::cout << " Error: max should be larger than min!" << std::endl;
return false;
}
for (BLASINT i = 0; i < dataSize; i++) tensorData[i] = DTTYPE(drand48()*(max-min)+min, drand48()*(max-min)+min);
return true;
}
template<typename DTTYPE> template<typename T>
typename std::enable_if<(!istc::isComplex<T>::value) && std::is_same<T, DTTYPE>::value, bool>::type BaseTensor<DTTYPE>::fill(const double &min, const double &max, const uint32_t &rdtype)
{
if (max <= min)
{
std::cout << " Error: max should be larger than min!" << std::endl;
return false;
}
for (BLASINT i = 0; i < dataSize; i++) tensorData[i] = drand48()*(max-min)+min;
return true;
}
template<typename DTTYPE>
void BaseTensor<DTTYPE>::fill(const DTTYPE &el)
{
for (BLASINT i = 0; i < dataSize; i++) tensorData[i] = el;
}
template<typename DTTYPE> template<typename T>
bool BaseTensor<DTTYPE>::fill(const T* data, const BLASINT &dtsize)
{
if (dataSize > dtsize)
{
std::cout << " Error: number of input data is not enough!" << std::endl;
return false;
}
if (typeid(DTTYPE) == typeid(T))
{
memcpy(tensorData, data, dataSize*sizeof(DTTYPE));
}
else
{
for (BLASINT i = 0; i < dataSize; i++) tensorData[i] = data[i];
}
return true;
}
template<typename DTTYPE>
bool BaseTensor<DTTYPE>::resize(std::initializer_list<BLASINT> rd)
{
if (rd.size() == 0) // rd = {}, empty
{
tensorRank = 0;
dataSize = 1;
}
else
{
for (auto p = rd.begin(); p < rd.end(); p++)
{
if (*p <= 0)
{
std::cout << " negative dimension!" << std::endl;
return false;
}
}
tensorRank = 0;
dataSize = 1;
for (auto p = rd.begin(); p < rd.end(); p++)
{
rankDim[tensorRank] = *p;
dataSize *= *p;
tensorRank++;
}
}
if (dataSize > maxDataSize)
{
maxDataSize = 32*(1+dataSize/32);
assert (tensorData != nullptr);
delete []tensorData;
tensorData = new(std::nothrow) DTTYPE[maxDataSize];
assert(tensorData);
}
setFileSize();
return true;
}
template<typename DTTYPE>
bool BaseTensor<DTTYPE>::resize(const uint32_t &tr, const BLASINT* rd)
{
if (tr == 0)
{
tensorRank = 0;
dataSize = 1;
}
else
{
for (uint32_t i = 0; i < tr; i++)
{
if (rd[i] <= 0)
{
std::cout << " Error: wrong dimension!" << std::endl;
return false;
}
}
tensorRank = 0;
dataSize = 1;
for (uint32_t i = 0; i < tr; i++)
{
rankDim[tensorRank] = rd[i];
dataSize *= rd[i];
tensorRank++;
}
}
if (dataSize > maxDataSize)
{
maxDataSize = 32*(1+dataSize/32);
assert (tensorData != nullptr);
delete []tensorData;
tensorData = new(std::nothrow) DTTYPE[maxDataSize];
assert(tensorData);
}
setFileSize();
return true;
}
template<typename DTTYPE>
bool BaseTensor<DTTYPE>::truncateRankDim(const uint32_t &idx, const BLASINT &resdim)
{
if (idx >= tensorRank || resdim <= 0 || resdim > rankDim[idx])
{
std::cout << " Error: parameter is wrong!" << std::endl;
return false;
}
if (resdim == rankDim[idx]) return true;
if (idx == 0) // first one
{
dataSize /= rankDim[idx];
rankDim[idx] = resdim;
dataSize *= rankDim[idx];
}
else if (idx+1 == tensorRank) // last one
{
BLASINT dcol = rankDim[idx];
BLASINT drow = dataSize/dcol;
for (BLASINT i = 1; i < drow; i++)
{
DTTYPE* dest = tensorData + i*resdim;
DTTYPE* sour = tensorData + i*dcol;
memmove(dest, sour, resdim*sizeof(DTTYPE));
}
dataSize /= rankDim[idx];
rankDim[idx] = resdim;
dataSize *= rankDim[idx];
}
else // middle
{
assert(0);
}
setFileSize();
return true;
}
/*********************end of the initializer****************************/
template<typename DTTYPE>
BaseTensor<DTTYPE> &BaseTensor<DTTYPE>::operator = (const BaseTensor &bt)
{
assert(this->resize(bt.tensorRank, bt.rankDim));
memcpy(tensorData, bt.tensorData, dataSize*sizeof(DTTYPE));
return *this;
}
template<typename DTTYPE>
BaseTensor<DTTYPE> &BaseTensor<DTTYPE>::operator *= (const DTTYPE &s)
{
if (s == 1.0) return *this;
scal(dataSize, s, tensorData, 1);
return *this;
}
template<typename DTTYPE>
BaseTensor<DTTYPE> &BaseTensor<DTTYPE>::operator /= (const DTTYPE &s)
{
if (s == 1.0) return *this;
scal(dataSize, 1.0/s, tensorData, 1);
return *this;
}
template<typename DTTYPE>
bool BaseTensor<DTTYPE>::operator == (const BaseTensor &bt) noexcept
{
if (tensorRank != bt.tensorRank) return false;
for (uint32_t i = 0; i < tensorRank; i++)
{
if (rankDim[i] != bt.rankDim[i]) return false;
}
for (BLASINT i = 0; i < dataSize; i++)
{
if (dabs(tensorData[i]-bt.tensorData[i]) > 1.0e-12) return false;
}
return true;
}
/*1. T_ijk = T_ijk * lambda[j]
* no summation over "j"
*/
template<typename DTTYPE> template <typename TLP>
bool BaseTensor<DTTYPE>::legProduct(const uint32_t &il, const TLP* lambda, const BLASINT &lmbdsize)
{
BLASINT Dl = 1;
for (uint32_t i = 0; i < il; i++) Dl *= rankDim[i];
BLASINT Dm = rankDim[il];
BLASINT Dr = (dataSize/Dl)/Dm;
#ifdef CHECK_VECTOR_SIZE
assert(lmbdsize >= Dm);
#endif
if (Dr == 1)
{
for (BLASINT l = 0; l < Dl; l++)
{
for (BLASINT m = 0; m < Dm; m++) tensorData[l*Dm+m] *= lambda[m];
}
}
else
{
for (BLASINT l = 0; l < Dl; l++)
{
for (BLASINT m = 0; m < Dm; m++)
{
for (BLASINT r = 0; r < Dr; r++) tensorData[(l*Dm+m)*Dr+r] *= lambda[m];
}
}
}
#ifdef CHECK_BASETENSOR
checkBaseTensor();
#endif
return true;
}
template<typename DTTYPE> template<typename TLP>
bool BaseTensor<DTTYPE>::legProduct(const uint32_t &il, const Vector<TLP> &lambda)
{
if (this->getRankDim(il) != lambda.getDataSize())
{
std::cout << " Error: size does not match!" << std::endl;
return false;
}
BLASINT Dl = 1;
for (uint32_t i = 0; i < il; i++) Dl *= rankDim[i];
BLASINT Dm = rankDim[il];
BLASINT Dr = (dataSize/Dl)/Dm;
if (Dr == 1)
{
for (BLASINT l = 0; l < Dl; l++)
{
for (BLASINT m = 0; m < Dm; m++) tensorData[l*Dm+m] *= lambda.tensorData[m];
}
}
else
{
for (BLASINT l = 0; l < Dl; l++)
{
for (BLASINT m = 0; m < Dm; m++)
{
for (BLASINT r = 0; r < Dr; r++) tensorData[(l*Dm+m)*Dr+r] *= lambda.tensorData[m];
}
}
}
#ifdef CHECK_BASETENSOR
checkBaseTensor();
#endif
return true;
}
/*2. T_ijk = T_ijk / lambda[j]
* no summation over "j"
*/
template<typename DTTYPE> template<typename TLP>
bool BaseTensor<DTTYPE>::legDivision(const uint32_t &il, const TLP* lambda, const BLASINT &lmbdsize)
{
BLASINT Dl = 1;
for (uint32_t i = 0; i < il; i++) Dl *= rankDim[i];
BLASINT Dm = rankDim[il];
BLASINT Dr = (dataSize/Dl)/Dm;
#ifdef CHECK_VECTOR_SIZE
assert(lmbdsize >= Dm);
#endif
for (BLASINT l = 0; l < Dl; l++)
{
for (BLASINT m = 0; m < Dm; m++)
{
for (BLASINT r = 0; r < Dr; r++) tensorData[(l*Dm+m)*Dr+r] /= lambda[m];
}
}
#ifdef CHECK_BASETENSOR
checkBaseTensor();
#endif
return true;
}
template<typename DTTYPE> template<typename TLP>
bool BaseTensor<DTTYPE>::legDivision(const uint32_t &il, const Vector<TLP> &lambda)
{
if (this->getRankDim(il) != lambda.getDataSize())
{
std::cout << " Error: size does not match!" << std::endl;
return false;
}
BLASINT Dl = 1;
for (uint32_t i = 0; i < il; i++) Dl *= rankDim[i];
BLASINT Dm = rankDim[il];
BLASINT Dr = (dataSize/Dl)/Dm;
for (BLASINT l = 0; l < Dl; l++)
{
for (BLASINT m = 0; m < Dm; m++)
{
for (BLASINT r = 0; r < Dr; r++) tensorData[(l*Dm+m)*Dr+r] /= lambda.tensorData[m];
}
}
#ifdef CHECK_BASETENSOR
checkBaseTensor();
#endif
return true;
}
/*shift index "idx" before "iprev": head 0, 1, 2, 3, 4, 5, ... tail*/
/*将idx插到iprev前面*/
template<typename DTTYPE>
bool BaseTensor<DTTYPE>::shiftBefore(const uint32_t &idx, const uint32_t &iprev, void* tmpdata, const BLASINT &tdsize)
{
assert(idx < tensorRank && iprev < tensorRank);
if (iprev == idx || iprev == idx+1) return true;
else if (idx > iprev) return shiftBefore_(idx, iprev, tmpdata, tdsize);
else return shiftAfter_(idx, iprev-1, tmpdata, tdsize);
}
template<typename DTTYPE>
bool BaseTensor<DTTYPE>::shiftBefore(const uint32_t &idx, const uint32_t &iprev, BaseTensor<DTTYPE> &bt) const
{
assert(idx < tensorRank && iprev < tensorRank);
if (iprev == idx || iprev == idx+1)
{
bt = *this;
return true;
}
else if (idx > iprev) return shiftBefore_(idx, iprev, bt);
else return shiftAfter_(idx, iprev-1, bt);
}
/*shift index "idx" after "iback": head 0, 1, 2, 3, 4, ... tail*/
/*将idx插到iback后面*/
template<typename DTTYPE>
bool BaseTensor<DTTYPE>::shiftAfter(const uint32_t &idx, const uint32_t &iback, void* tmpdata, const BLASINT &tdsize)
{
assert (idx < tensorRank && iback < tensorRank);
if (iback == idx || iback+1 == idx) return true;
else if (idx < iback) return shiftAfter_(idx, iback, tmpdata, tdsize);
else return shiftBefore_(idx, iback+1, tmpdata, tdsize);
}
template<typename DTTYPE>
bool BaseTensor<DTTYPE>::shiftAfter(const uint32_t &idx, const uint32_t &iback, BaseTensor<DTTYPE> &bt) const
{
assert (idx < tensorRank && iback < tensorRank);
if (iback == idx || iback+1 == idx)
{
bt = *this;
return true;
}
else if (idx < iback) return shiftAfter_(idx, iback, bt);
else return shiftBefore_(idx, iback+1, bt);
}
/*permute the index of the tensor : it is very slow*/
template<typename DTTYPE>
bool BaseTensor<DTTYPE>::permute(std::initializer_list<uint32_t> ri, void* tmpdata, const BLASINT &tdsize)
{
std::cout << "in prepare" << std::endl;
assert(0);
uint32_t targetindex[MAXTRK];
uint32_t localrank = 0;
for (auto p = ri.begin(); p < ri.end(); p++)
{
targetindex[localrank] = *p;
localrank++;
}
/*check*/
assert(localrank == tensorRank);
for (uint32_t i = 0; i < tensorRank; i++)
{
bool matched = false;
for (uint32_t j = 0; j < tensorRank; j++) if (i == targetindex[j]) matched = true;
if (!matched) assert(0);
}
/*check finished*/
uint32_t presentindex[MAXTRK];
for (uint32_t i = 0; i < tensorRank; i++) presentindex[i] = i;
uint32_t index = 0;
while (index+1 < tensorRank)
{
uint32_t tindex = targetindex[index];
uint32_t pindex = 0;
for (uint32_t i = index; i < tensorRank; i++)
{
if (tindex == presentindex[i])
{
pindex = i;
break;
}
}
if (pindex > index)
{
for (uint32_t i = pindex; i > index; i--) presentindex[i] = presentindex[i-1];
presentindex[index] = tindex;
shiftBefore(pindex, index, tmpdata, tdsize);
}
index++;
}
for (uint32_t i = 0; i < tensorRank; i++) assert(targetindex[i] == presentindex[i]);
#ifdef CHECK_BASETENSOR
checkBaseTensor();
#endif
return true;
}
/*move the leg to the leftmost and perform singular value decomposition, T = U\lambda V^T,
* Utype = 'S' or 'A',
* thistype = 'O', 'K', or 'D'
* V^T is stored in this tensor if "thistype = 'O'" => overwritten
* this tensor is kept when thistype = 'K' => kept
* this tensor is destroyed when thistype = 'D' => destroyed
*/
template<typename DTTYPE> template<typename TSR, typename> BLASINT BaseTensor<DTTYPE>::leftSVD(const char &thistype, const uint32_t &index, const char &Utype, Vector<RDTTYPE> &lambda, TSR &U, void* pool, const BLASINT &poolsize)
{
#ifdef DEBUG_LEFTSVD_TIME
clock_t time_begin = clock();
#endif
assert(index < tensorRank);
assert(thistype == 'O' || thistype == 'K' || thistype == 'D');
assert(Utype == 'A' || Utype == 'S');
BLASINT diml = 1;
for (BLASINT i = 0; i < index; i++) diml *= rankDim[i];
BLASINT dimm = rankDim[index];
BLASINT dimr = 1;
for (BLASINT i = index+1; i < tensorRank; i++) dimr *= rankDim[i];
const BLASINT dimlr = diml*dimr;
assert(dimlr*dimm == dataSize);
const BLASINT mindim = dimm<dimlr?dimm:dimlr;
BLASINT dcolU; // column of U
if (Utype == 'A') dcolU = dimm;
else dcolU = mindim;
DTTYPE* psour;
DTTYPE* tmpdata = reinterpret_cast<DTTYPE*>(pool);
assert(dataSize < poolsize);
BLASINT extdtsize = 0;
if (thistype == 'K')
{
llmopr::transposeLMto(diml, dimm, dimr, tensorData, tmpdata);
psour = tmpdata;
extdtsize = dataSize;
}
else
{
shiftBefore(index, 0, tmpdata, dataSize*sizeof(DTTYPE));
psour = tensorData;
}
char VDtype = 'N';
if (thistype == 'O') VDtype = 'O';
BLASINT info;
DTTYPE VT[16];// just a variable, not referenced,
assert(lambda.resize({dcolU}));
lambda.fill(0);
assert(U.resize({dimm, dcolU}));
miolpk::miogesvd(Utype, VDtype, dimm, dimlr, psour, dimlr, lambda.tensorData, U.tensorData, dcolU, VT, 2, info, tmpdata+extdtsize, poolsize-extdtsize*sizeof(DTTYPE));
if (thistype == 'O')
{
dataSize /= rankDim[0];
rankDim[0] = mindim; // min(dimm, dimlr)
dataSize *= rankDim[0];
setFileSize(); //correct
}
#ifdef CHECK_BASETENSOR
checkBaseTensor();
#endif
#ifdef DEBUG_LEFTSVD_TIME
std::cout<<"Time on leftSVD is " << double(clock()-time_begin)/CLOCKS_PER_SEC << std::endl;
#endif
return info;
}
template<typename DTTYPE>
auto BaseTensor<DTTYPE>::leftSVD(const char &thistype, const uint32_t &index, const char &Utype, void* pool, const BLASINT &poolsize)
{
assert(index < tensorRank);
assert(thistype == 'O' || thistype == 'K' || thistype == 'D');
assert(Utype == 'A' || Utype == 'S');
BLASINT diml = 1;
for (BLASINT i = 0; i < index; i++) diml *= rankDim[i];
BLASINT dimm = rankDim[index];
BLASINT dimr = 1;
for (BLASINT i = index+1; i < tensorRank; i++) dimr *= rankDim[i];
const BLASINT dimlr = diml*dimr;
assert(dimlr*dimm == dataSize);
const BLASINT mindim = dimm<dimlr?dimm:dimlr;
BLASINT dcolU; // column of U
if (Utype == 'A') dcolU = dimm;
else dcolU = mindim;
DTTYPE* psour;
DTTYPE* tmpdata = reinterpret_cast<DTTYPE*>(pool);
assert(dataSize < poolsize);
if (thistype == 'K')
{
llmopr::transposeLMto(diml, dimm, dimr, tensorData, tmpdata);
psour = tmpdata;
}
else
{
shiftBefore(index, 0, tmpdata, dataSize*sizeof(DTTYPE));
psour = tensorData;
}
char VDtype = 'N';
if (thistype == 'O') VDtype = 'O';
BLASINT info;
DTTYPE VT[16];// just a variable, not referenced,
Vector<RDTTYPE> lambda(dcolU);
lambda.fill(0);
BaseTensor<DTTYPE> U({dimm, dcolU});
miolpk::miogesvd(Utype, VDtype, dimm, dimlr, psour, dimlr, lambda.tensorData, U.tensorData, dcolU, VT, 2, info, tmpdata+dataSize, poolsize-dataSize*sizeof(DTTYPE));
if (thistype == 'O')
{
dataSize /= rankDim[0];
rankDim[0] = mindim; // min(dimm, dimlr)
dataSize *= rankDim[0];
setFileSize(); //correct
}
#ifdef CHECK_BASETENSOR
checkBaseTensor();
#endif
assert(info == 0);
return std::tuple<BaseTensor<DTTYPE>, Vector<RDTTYPE>>(U, lambda);
}
/*move the leg to the rightmost and perform singular value decomposition, T = U\lambda V^T
* VDtype = 'S' or 'A', U is stored locally.
*/
template<typename DTTYPE> template<typename TSR, typename>
BLASINT BaseTensor<DTTYPE>::rightSVD(const char &thistype, const uint32_t &index, const char &VDtype, Vector<RDTTYPE> &lambda, TSR &VD, void* pool, const BLASINT &poolsize)
{
#ifdef DEBUG_RIGHTSVD_TIME
clock_t time_begin = clock();
#endif
assert(index < tensorRank && tensorRank >= 2);
assert(thistype == 'O' || thistype == 'K' || thistype == 'D');
assert(VDtype == 'A' || VDtype == 'S');
BLASINT diml = 1;
for (BLASINT i = 0; i < index; i++) diml *= rankDim[i];
BLASINT dimm = rankDim[index];
BLASINT dimr = 1;
for (BLASINT i = index+1; i < tensorRank; i++) dimr *= rankDim[i];
const BLASINT dimlr = diml*dimr;
assert(dimlr*dimm == dataSize);
const BLASINT mindim = dimm<dimlr?dimm:dimlr;
BLASINT drowVD;
if (VDtype == 'A') drowVD = dimm;
else drowVD = mindim;
DTTYPE* psour;
DTTYPE* tmpdata = reinterpret_cast<DTTYPE*>(pool);
assert(poolsize >= dataSize*sizeof(DTTYPE));
BLASINT extdtsize = 0;
if (thistype == 'K')
{
llmopr::transposeMRto(diml, dimm, dimr, tensorData, tmpdata);
psour = tmpdata;
extdtsize = dataSize;
}
else
{
shiftAfter(index, tensorRank-1, tmpdata, dataSize*sizeof(DTTYPE));
psour = tensorData;
}
char Utype = 'N';
if (thistype == 'O') Utype = 'O';
BLASINT info;
DTTYPE U[16]; // not referenced
assert(lambda.resize({drowVD}));
lambda.fill(0);
VD.resize({drowVD, dimm});
miolpk::miogesvd(Utype, VDtype, dimlr, dimm, psour, dimm, lambda.tensorData, U, 2, VD.tensorData, dimm, info, tmpdata+extdtsize, poolsize-extdtsize*sizeof(DTTYPE));
if (thistype == 'O')
{
dataSize /= rankDim[tensorRank-1];
rankDim[tensorRank-1] = mindim; // min(dimm, dimlr)
dataSize *= rankDim[tensorRank-1];
setFileSize(); //correct
}
#ifdef CHECK_BASETENSOR
checkBaseTensor();
#endif
#ifdef DEBUG_RIGHTSVD_TIME
std::cout<<"Time on rightSVD is " << double(clock()-time_begin)/CLOCKS_PER_SEC << std::endl;
#endif
return info;
}
template<typename DTTYPE>
auto BaseTensor<DTTYPE>::rightSVD(const char &thistype, const uint32_t &index, const char &VDtype, void* pool, const BLASINT &poolsize)
{
assert(index < tensorRank && tensorRank >= 2);
assert(thistype == 'O' || thistype == 'K' || thistype == 'D');
assert(VDtype == 'A' || VDtype == 'S');
BLASINT diml = 1;
for (BLASINT i = 0; i < index; i++) diml *= rankDim[i];
BLASINT dimm = rankDim[index];
BLASINT dimr = 1;
for (BLASINT i = index+1; i < tensorRank; i++) dimr *= rankDim[i];
const BLASINT dimlr = diml*dimr;
assert(dimlr*dimm == dataSize);
const BLASINT mindim = dimm<dimlr?dimm:dimlr;
BLASINT drowVD;
if (VDtype == 'A') drowVD = dimm;
else drowVD = mindim;
DTTYPE* psour;
DTTYPE* tmpdata = reinterpret_cast<DTTYPE*>(pool);
assert(poolsize >= dataSize*sizeof(DTTYPE));
if (thistype == 'K')
{
llmopr::transposeMRto(diml, dimm, dimr, tensorData, tmpdata);
psour = tmpdata;
}
else
{
shiftAfter(index, tensorRank-1, tmpdata, dataSize*sizeof(DTTYPE));
psour = tensorData;
}
char Utype = 'N';
if (thistype == 'O') Utype = 'O';
BLASINT info;
DTTYPE U[16]; // not referenced
Vector<RDTTYPE> lambda(drowVD);
lambda.fill(0);
BaseTensor<DTTYPE> VD({drowVD, dimm});
miolpk::miogesvd(Utype, VDtype, dimlr, dimm, psour, dimm, lambda.tensorData, U, 2, VD.tensorData, dimm, info, tmpdata+dataSize, poolsize-dataSize*sizeof(DTTYPE));
if (thistype == 'O')
{
dataSize /= rankDim[tensorRank-1];
rankDim[tensorRank-1] = mindim; // min(dimm, dimlr)
dataSize *= rankDim[tensorRank-1];
setFileSize(); //correct
}
#ifdef CHECK_BASETENSOR
checkBaseTensor();
#endif
assert(info == 0);
return std::tuple<Vector<RDTTYPE>, BaseTensor<DTTYPE> >(lambda, VD);
}
/*leftSVDD : SVD using gesdd. For more information, see the manual of lapack*/
template<typename DTTYPE> template<typename TSR, typename>
BLASINT BaseTensor<DTTYPE>::leftSVDD(const char &thistype, const uint32_t &index, const char &SVDtype, TSR &U, Vector<RDTTYPE> &lambda, TSR &VD, void* pool, const BLASINT &poolsize)
{
#ifdef DEBUG_LEFTSVDD_TIME
clock_t time_begin = clock();
#endif
assert(index < tensorRank);
assert(thistype == 'K' || thistype == 'D');
assert(SVDtype == 'A' || SVDtype == 'S');
BLASINT diml = 1;
for (BLASINT i = 0; i < index; i++) diml *= rankDim[i];
BLASINT dimm = rankDim[index];
BLASINT dimr = 1;
for (BLASINT i = index+1; i < tensorRank; i++) dimr *= rankDim[i];
const BLASINT dimlr = diml*dimr;
assert(dimlr*dimm == dataSize);
const BLASINT mindim = dimm<dimlr?dimm:dimlr;
DTTYPE* psour;
DTTYPE* tmpdata = reinterpret_cast<DTTYPE*>(pool);
assert(dataSize < poolsize);
BLASINT extdtsize = 0;
if (thistype == 'K')
{
llmopr::transposeLMto(diml, dimm, dimr, tensorData, tmpdata);
psour = tmpdata;
extdtsize = dataSize;
}
else
{
shiftBefore(index, 0, tmpdata, dataSize*sizeof(DTTYPE));
psour = tensorData;
}
BLASINT info;
assert(lambda.resize({mindim}));
lambda.fill(0.0);
if (SVDtype == 'A')
{
assert(U.resize({dimm, dimm}));
assert(VD.resize({dimlr, dimlr}));
}
else
{
assert(U.resize({dimm, mindim}));
assert(VD.resize({mindim, dimlr}));
}
miolpk::miogesdd(SVDtype, dimm, dimlr, psour, dimlr, lambda.tensorData, U.tensorData, U.getRankDim(U.getTensorRank()-1), VD.tensorData, dimlr, info, tmpdata+extdtsize, poolsize-extdtsize*sizeof(DTTYPE));
#ifdef CHECK_BASETENSOR
checkBaseTensor();
#endif
#ifdef DEBUG_LEFTSVDD_TIME
std::cout<<"Time on leftSVDD is " << double(clock()-time_begin)/CLOCKS_PER_SEC << std::endl;
#endif
return info;
}
/*rightSDD : SVD using gesdd*/
template<typename DTTYPE> template<typename TSR, typename>
BLASINT BaseTensor<DTTYPE>::rightSVDD(const char &thistype, const uint32_t &index, const char &SVDtype, TSR &U, Vector<RDTTYPE> &lambda, TSR &VD, void* pool, const BLASINT &poolsize)
{
#ifdef DEBUG_RIGHTSVDD_TIME
clock_t time_begin = clock();
#endif
assert(index < tensorRank && tensorRank >= 2);
assert(thistype == 'K' || thistype == 'D');
assert(SVDtype == 'A' || SVDtype == 'S');
BLASINT diml = 1;
for (BLASINT i = 0; i < index; i++) diml *= rankDim[i];
BLASINT dimm = rankDim[index];
BLASINT dimr = 1;
for (BLASINT i = index+1; i < tensorRank; i++) dimr *= rankDim[i];
const BLASINT dimlr = diml*dimr;
assert(dimlr*dimm == dataSize);
const BLASINT mindim = dimm<dimlr?dimm:dimlr;
DTTYPE* psour;
DTTYPE* tmpdata = reinterpret_cast<DTTYPE*>(pool);
assert(poolsize >= dataSize*sizeof(DTTYPE));
BLASINT extdtsize = 0;
if (thistype == 'K')
{
llmopr::transposeMRto(diml, dimm, dimr, tensorData, tmpdata);
psour = tmpdata;
extdtsize = dataSize;
}
else
{
shiftAfter(index, tensorRank-1, tmpdata, dataSize*sizeof(DTTYPE));
psour = tensorData;
}
BLASINT info;
assert(lambda.resize({mindim}));
lambda.fill(0.0);
if (SVDtype == 'A')
{
assert(U.resize({dimlr, dimlr}));
assert(VD.resize({dimm, dimm}));
}
else
{
assert(U.resize({dimlr, mindim}));
assert(VD.resize({mindim, dimm}));
}
miolpk::miogesdd(SVDtype, dimlr, dimm, psour, dimm, lambda.tensorData, U.tensorData, U.getRankDim(U.getTensorRank()-1), VD.tensorData, dimm, info, tmpdata+extdtsize, poolsize-extdtsize*sizeof(DTTYPE));
#ifdef CHECK_BASETENSOR
checkBaseTensor();
#endif
#ifdef DEBUG_RIGHTSVDD_TIME
std::cout<<"Time on rightSVDD is " << double(clock()-time_begin)/CLOCKS_PER_SEC << std::endl;
#endif
return info;
}
/*high order SVD, all U are unitary matrix, core tensor is stored locally and no truncation*/
template<typename DTTYPE>
void BaseTensor<DTTYPE>::highOrderLSVD(const uint32_t &num, const uint32_t* index, DTTYPE**U, const BLASINT* Usize, void* lambdamax, const BLASINT &lmbdsize, void* pool, const BLASINT &poolsize)
{
RDTTYPE* startlambda = reinterpret_cast<RDTTYPE*>(lambdamax);
#ifdef CHECK_VECTOR_SIZE
BLASINT lmbdsizesum = 0;
#endif
DTTYPE* tmpdata = reinterpret_cast<DTTYPE*>(pool);
for (BLASINT n = 0; n < num; n++)
{
BLASINT diml = 1;
for (BLASINT i = 0; i < index[n]; i++) diml *= rankDim[i];
BLASINT dimr = 1;
for (BLASINT i = index[n]+1; i < tensorRank; i++) dimr *= rankDim[i];
BLASINT dimm = rankDim[index[n]];
const BLASINT dimlr = diml*dimr;
assert(dimlr*dimm == dataSize);
BLASINT mindim = dimm<dimlr?dimm:dimlr;
/*B_jik <= A_ijk*/
llmopr::transposeLMto(diml, dimm, dimr, tensorData, tmpdata);
BLASINT info;
DTTYPE VD[16];// just a variable, not referenced,
#ifdef CHECK_VECTOR_SIZE
assert(lmbdsizesum+dimm <= lmbdsize);
assert(Usize[n] >= dimm*dimm);
lmbdsizesum += dimm; // sum here
#endif
memset(startlambda, 0, dimm*sizeof(RDTTYPE)); // dimtrlbd[n] singular values are kept, including those zero ones, it may be larger than mindim.
miolpk::miogesvd('A', 'N', dimm, dimlr, tmpdata, dimlr, startlambda, U[n], dimm, VD, 2, info, tmpdata+dataSize, poolsize-dataSize*sizeof(DTTYPE));
startlambda += dimm;
}
for (BLASINT n = 0; n < num; n++)
{
BLASINT diml = 1;
for (BLASINT i = 0; i < index[n]; i++) diml *= rankDim[i];
BLASINT dimr = 1;
for (BLASINT i = index[n]+1; i < tensorRank; i++) dimr *= rankDim[i];
BLASINT dimm = rankDim[index[n]];
for (BLASINT l = 0; l < diml; l++)
{
DTTYPE* psour = tensorData + l*dimm*dimr;
gemm('C', 'N', dimm, dimr, dimm, 1.0, U[n], dimm, psour, dimr, 0.0, tmpdata, dimr);
memcpy(psour, tmpdata, dimm*dimr*sizeof(DTTYPE));
}
}
#ifdef CHECK_BASETENSOR
checkBaseTensor();
#endif
}
template<typename DTTYPE>
void BaseTensor<DTTYPE>::highOrderLSVD(const uint32_t &num, const uint32_t* index, BaseTensor**U, Vector<RDTTYPE>** lambda, void* pool, const BLASINT &poolsize)
{
for (uint32_t i = 0; i < num; i++) assert(index[i] < tensorRank);
DTTYPE* tmpdata = reinterpret_cast<DTTYPE*>(pool);
for (BLASINT n = 0; n < num; n++)
{
BLASINT diml = 1;
for (BLASINT i = 0; i < index[n]; i++) diml *= rankDim[i];
BLASINT dimr = 1;
for (BLASINT i = index[n]+1; i < tensorRank; i++) dimr *= rankDim[i];
BLASINT dimm = rankDim[index[n]];
U[n]->resize({dimm, dimm});
const BLASINT dimlr = diml*dimr;
assert(dimlr*dimm == dataSize);
BLASINT mindim = dimm<dimlr?dimm:dimlr;
/*B_jik <= A_ijk*/
llmopr::transposeLMto(diml, dimm, dimr, tensorData, tmpdata);
BLASINT info;
DTTYPE VD[16];// just a variable, not referenced,
RDTTYPE* lmbdn;
BLASINT extlmbdnsize;
RDTTYPE* rtmpdata = reinterpret_cast<RDTTYPE*> (tmpdata+dataSize);
if (lambda == nullptr)
{
lmbdn = rtmpdata;
extlmbdnsize = dimm;
}
else
{
lambda[n]->resize({dimm});
lambda[n]->fill(0.0);
lmbdn = lambda[n]->tensorData;
extlmbdnsize = 0;
}
miolpk::miogesvd('A', 'N', dimm, dimlr, tmpdata, dimlr, lmbdn, U[n]->tensorData, dimm, VD, 2, info, rtmpdata+extlmbdnsize, poolsize-dataSize*sizeof(DTTYPE)-extlmbdnsize*sizeof(RDTTYPE));
}
for (BLASINT n = 0; n < num; n++)
{
BLASINT diml = 1;
for (BLASINT i = 0; i < index[n]; i++) diml *= rankDim[i];
BLASINT dimr = 1;
for (BLASINT i = index[n]+1; i < tensorRank; i++) dimr *= rankDim[i];
BLASINT dimm = rankDim[index[n]];
for (BLASINT l = 0; l < diml; l++)
{
DTTYPE* psour = tensorData + l*dimm*dimr;
gemm('C', 'N', dimm, dimr, dimm, 1.0, U[n]->tensorData, dimm, psour, dimr, 0.0, tmpdata, dimr);
memcpy(psour, tmpdata, dimm*dimr*sizeof(DTTYPE));
}
}
#ifdef CHECK_BASETENSOR
checkBaseTensor();
#endif
}
/*high order SVD, all U are unitary matrix, core tensor is stored locally and truncations are performed according to dimtru[]
* U ==> no truncation
* core tensor ==> truncated
*/
template<typename DTTYPE>
void BaseTensor<DTTYPE>::highOrderLSVD(const uint32_t &num, const uint32_t* index, const BLASINT* dimtru, DTTYPE**U, const BLASINT* Usize, void* lambdamax, const BLASINT &lmbdsize, void* pool, const BLASINT &poolsize)
{
RDTTYPE* startlambda = reinterpret_cast<RDTTYPE*>(lambdamax);
#ifdef CHECK_VECTOR_SIZE
BLASINT lmbdsizesum = 0;
#endif
DTTYPE* tmpdata = reinterpret_cast<DTTYPE*>(pool);
for (BLASINT n = 0; n < num; n++)
{
BLASINT diml = 1;
for (BLASINT i = 0; i < index[n]; i++) diml *= rankDim[i];
BLASINT dimr = 1;
for (BLASINT i = index[n]+1; i < tensorRank; i++) dimr *= rankDim[i];
BLASINT dimm = rankDim[index[n]];
const BLASINT dimlr = diml*dimr;
assert(dimlr*dimm == dataSize);
const BLASINT mindim = dimm<dimlr?dimm:dimlr;
/*B_jik <= A_ijk*/
llmopr::transposeLMto(diml, dimm, dimr, tensorData, tmpdata);
BLASINT info;
DTTYPE VD[16];// just a variable, not referenced,
#ifdef CHECK_VECTOR_SIZE
assert(lmbdsizesum+dimm <= lmbdsize);
assert(Usize[n] >= dimm*dimm); // no truncation in U, ==> U[dimm,dimm]
lmbdsizesum += dimm; // sum here
#endif
memset(startlambda, 0, dimm*sizeof(RDTTYPE));
miolpk::miogesvd('A', 'N', dimm, dimlr, tmpdata, dimlr, startlambda, U[n], dimm, VD, 2, info, tmpdata+dataSize, poolsize-dataSize*sizeof(DTTYPE));
startlambda += dimm;
}
for (BLASINT n = 0; n < num; n++)
{
BLASINT diml = 1;
for (BLASINT i = 0; i < index[n]; i++) diml *= rankDim[i];
BLASINT dimr = 1;
for (BLASINT i = index[n]+1; i < tensorRank; i++) dimr *= rankDim[i];
BLASINT dimm = rankDim[index[n]];
assert(dimm >= dimtru[n]);
for (BLASINT l = 0; l < diml; l++)
{
DTTYPE* psour = tensorData + l*dimm*dimr;
DTTYPE* pdest = tmpdata + l*dimtru[n]*dimr;
gemm('C', 'N', dimtru[n], dimr, dimm, 1.0, U[n], dimm, psour, dimr, 0.0, pdest, dimr);
}
memcpy(tensorData, tmpdata, diml*dimtru[n]*dimr*sizeof(DTTYPE));
rankDim[index[n]] = dimtru[n];
}
dataSize = 1;
for (BLASINT i = 0; i < tensorRank; i++) dataSize *= rankDim[i];
setFileSize();
#ifdef CHECK_BASETENSOR
checkBaseTensor();
#endif
}
template<typename DTTYPE>
void BaseTensor<DTTYPE>::highOrderLSVD(const uint32_t &num, const uint32_t* index, const BLASINT* dimtru, BaseTensor** U, Vector<RDTTYPE>** lambda, void* pool, const BLASINT &poolsize)
{
for (uint32_t i = 0; i < num; i++) assert(index[i] < tensorRank);
DTTYPE* tmpdata = reinterpret_cast<DTTYPE*>(pool);
for (BLASINT n = 0; n < num; n++)
{
BLASINT diml = 1;
for (BLASINT i = 0; i < index[n]; i++) diml *= rankDim[i];
BLASINT dimr = 1;
for (BLASINT i = index[n]+1; i < tensorRank; i++) dimr *= rankDim[i];
BLASINT dimm = rankDim[index[n]];
U[n]->resize({dimm, dimm});
const BLASINT dimlr = diml*dimr;
assert(dimlr*dimm == dataSize);
const BLASINT mindim = dimm<dimlr?dimm:dimlr;
/*B_jik <= A_ijk*/
llmopr::transposeLMto(diml, dimm, dimr, tensorData, tmpdata);
BLASINT info;
DTTYPE VD[16];// just a variable, not referenced,
RDTTYPE* lmbdn;
BLASINT extlmbdnsize;
RDTTYPE* rtmpdata = reinterpret_cast<RDTTYPE*> (tmpdata+dataSize);
if (lambda == nullptr)
{
lmbdn = rtmpdata;
extlmbdnsize = dimm;
}
else
{
lambda[n]->resize({dimm});
lambda[n]->fill(0.0);
lmbdn = lambda[n]->tensorData;
extlmbdnsize = 0;
}
miolpk::miogesvd('A', 'N', dimm, dimlr, tmpdata, dimlr, lmbdn, U[n]->tensorData, dimm, VD, 2, info, rtmpdata+extlmbdnsize, poolsize-dataSize*sizeof(DTTYPE)-extlmbdnsize*sizeof(RDTTYPE));
assert(info == 0);
}
for (BLASINT n = 0; n < num; n++)
{
BLASINT diml = 1;
for (BLASINT i = 0; i < index[n]; i++) diml *= rankDim[i];
BLASINT dimr = 1;
for (BLASINT i = index[n]+1; i < tensorRank; i++) dimr *= rankDim[i];
BLASINT dimm = rankDim[index[n]];
assert(dimm >= dimtru[n]);
for (BLASINT l = 0; l < diml; l++)
{
DTTYPE* psour = tensorData + l*dimm*dimr;
DTTYPE* pdest = tmpdata + l*dimtru[n]*dimr;
gemm('C', 'N', dimtru[n], dimr, dimm, 1.0, U[n]->tensorData, dimm, psour, dimr, 0.0, pdest, dimr);
}
memcpy(tensorData, tmpdata, diml*dimtru[n]*dimr*sizeof(DTTYPE));
rankDim[index[n]] = dimtru[n];
}
dataSize = 1;
for (BLASINT i = 0; i < tensorRank; i++) dataSize *= rankDim[i];
setFileSize();
#ifdef CHECK_BASETENSOR
checkBaseTensor();
#endif
}
template<typename DTTYPE>
void BaseTensor<DTTYPE>::highOrderLSVDx(const uint32_t &num, const uint32_t* index, const BLASINT* dimtru, DTTYPE**U, const BLASINT* Usize, void* lambdamax, const BLASINT &lmbdsize, const BLASINT* dimtrlbd, void* pool, const BLASINT &poolsize)
{
RDTTYPE* startlambda = reinterpret_cast<RDTTYPE*>(lambdamax);
#ifdef CHECK_VECTOR_SIZE
BLASINT lmbdsizesum = 0;
#endif
DTTYPE* tmpdata = reinterpret_cast<DTTYPE*>(pool);
for (BLASINT n = 0; n < num; n++)
{
BLASINT diml = 1;
for (BLASINT i = 0; i < index[n]; i++) diml *= rankDim[i];
BLASINT dimr = 1;
for (BLASINT i = index[n]+1; i < tensorRank; i++) dimr *= rankDim[i];
const BLASINT dimm = rankDim[index[n]];
const BLASINT dimlr = diml*dimr;
assert(dimm*dimlr == dataSize);
const BLASINT mindim = dimm<dimlr?dimm:dimlr;
/*B_jik <= A_ijk*/
llmopr::transposeLMto(diml, dimm, dimr, tensorData, tmpdata);
BLASINT info;
DTTYPE VD[16];// just a variable, not referenced,
RDTTYPE vl, vu;
BLASINT ns;
#ifdef CHECK_VECTOR_SIZE
assert(lmbdsizesum+mindim <= lmbdsize && lmbdsizesum+dimtrlbd[n] <= lmbdsize);
assert(Usize[n] >= dimm*dimtru[n]); // no truncation in U, ==> U[dimm,dimm]
lmbdsizesum += dimtrlbd[n]; // sum here
#endif
memset(startlambda, 0, dimtrlbd[n]*sizeof(RDTTYPE)); // only dimtrlbd[n] singular values are kept, it can be larger than dimtru[n]
miolpk::miogesvdx('V', 'N', 'I', dimm, dimlr, tmpdata, dimlr, vl, vu, 1, dimtru[n], ns, startlambda, U[n], dimtru[n], VD, 2, info, tmpdata+dataSize, poolsize-dataSize*sizeof(DTTYPE));
assert(info == 0 && ns == dimtru[n]);
startlambda += dimtrlbd[n];
}
for (BLASINT n = 0; n < num; n++)
{
BLASINT diml = 1;
for (BLASINT i = 0; i < index[n]; i++) diml *= rankDim[i];
BLASINT dimr = 1;
for (BLASINT i = index[n]+1; i < tensorRank; i++) dimr *= rankDim[i];
BLASINT dimm = rankDim[index[n]];
assert(dimm >= dimtru[n]);
for (BLASINT l = 0; l < diml; l++)
{
DTTYPE* psour = tensorData + l*dimm*dimr;
DTTYPE* pdest = tmpdata + l*dimtru[n]*dimr;
gemm('C', 'N', dimtru[n], dimr, dimm, 1.0, U[n], dimtru[n], psour, dimr, 0.0, pdest, dimr);
}
memcpy(tensorData, tmpdata, diml*dimtru[n]*dimr*sizeof(DTTYPE));
rankDim[index[n]] = dimtru[n];
}
dataSize = 1;
for (BLASINT i = 0; i < tensorRank; i++) dataSize *= rankDim[i];
setFileSize();
#ifdef CHECK_BASETENSOR
checkBaseTensor();
#endif
}
template<typename DTTYPE>
void BaseTensor<DTTYPE>::highOrderLSVDx(const uint32_t &num, const uint32_t* index, const BLASINT* dimtru, BaseTensor** U, Vector<RDTTYPE>** lambda, void* pool, const BLASINT &poolsize)
{
assert(0);
}
template<typename DTTYPE> template<typename RDT, typename> auto BaseTensor<DTTYPE>::normalize(const BLASINT &ntype, const RDT &nr)
{
if (ntype == 0)
{
auto rnm = nrm2(dataSize, tensorData, 1);
DTTYPE invr = nr/rnm;
scal(dataSize, invr, tensorData, 1);
return rnm;
}
else if (ntype == 1) // +nrm or -nrm for the maximum absolute one
{
auto maxcs = dabs(tensorData[0]);
for (BLASINT i = 1; i < dataSize; i++)
{
if (dabs(tensorData[i]) > maxcs) maxcs = dabs(tensorData[i]);
}
DTTYPE invmaxcs = nr/maxcs;
scal(dataSize, invmaxcs, tensorData, 1);
return maxcs;
}
else
{
assert(0);
abort();
}
}
template<typename DTTYPE> template<typename RDT, typename> auto BaseTensor<DTTYPE>::norm(const BLASINT &ntype)
{
if (ntype == 0)
{
return nrm2(dataSize, tensorData, 1);
}
else if (ntype == 1) // +nrm or -nrm for the maximum absolute one
{
auto maxcs = dabs(tensorData[0]);
for (BLASINT i = 1; i < dataSize; i++)
{
if (dabs(tensorData[i]) > maxcs) maxcs = dabs(tensorData[i]);
}
return maxcs;
}
else
{
assert(0);
abort();
}
}
template<typename DTTYPE>
void BaseTensor<DTTYPE>::cconj() noexcept
{
if (typeid(RDTTYPE) != typeid(DTTYPE))
{
for (BLASINT i = 0; i < dataSize; i++) tensorData[i] = conj(tensorData[i]);
}
}
template<typename DTTYPE>
bool BaseTensor<DTTYPE>::saveTensorToFile(const char* filename) const
{
std::ofstream file;
file.open(filename, ios::trunc | ios::out);
if (!file) return false;
bool savebool = saveTensorToStream(file);
file.close();
return savebool;
}
template<typename DTTYPE>
bool BaseTensor<DTTYPE>::saveTensorToStream(std::ofstream &ofs) const
{
ofs.write((const char*)&fileSize, sizeof(long));
ofs.write((const char*)&tensorRank, sizeof(uint32_t));
for (BLASINT i = 0; i < tensorRank; i++) ofs.write((const char*)&rankDim[i], sizeof(BLASINT));
ofs.write((const char*)&dataSize, sizeof(BLASINT));
ofs.write((const char*)tensorData, dataSize*sizeof(DTTYPE));
return true;
}
template<typename DTTYPE>
bool BaseTensor<DTTYPE>::readTensorFromStream(ifstream &ifs)
{
ifs.read((char*)&fileSize, sizeof(long));
ifs.read((char*)&tensorRank, sizeof(uint32_t));
for (uint32_t i = 0; i < tensorRank; i++) ifs.read((char*)&rankDim[i], sizeof(BLASINT));
ifs.read((char*)&dataSize, sizeof(BLASINT));
tensorData = new(std::nothrow) DTTYPE[dataSize];
assert(tensorData);
ifs.read((char*)tensorData, dataSize*sizeof(DTTYPE));
return true;
}
template<typename DTTYPE>
bool BaseTensor<DTTYPE>::readTensorFromMemory(char* buffer)
{
char* nextbuffer = buffer;
fileSize = *((long*)nextbuffer);
nextbuffer += sizeof(long);
tensorRank = *((uint32_t*)nextbuffer);
nextbuffer += sizeof(uint32_t);
for (BLASINT i = 0; i < tensorRank; i++)
{
rankDim[i] = *((BLASINT*)nextbuffer);
nextbuffer += sizeof(BLASINT);
}
dataSize = *((BLASINT*)nextbuffer);
nextbuffer += sizeof(BLASINT);
tensorData = new(std::nothrow) DTTYPE[dataSize];
assert(tensorData);
memcpy((char*)tensorData, nextbuffer, dataSize*sizeof(DTTYPE));
nextbuffer += dataSize*sizeof(DTTYPE);
return true;
}
template<typename DTTYPE>
bool BaseTensor<DTTYPE>::printTensorToFile(const char* suffixname, const streamsize &width, const char &type)
{
/*
* If type == 'A', print tensorRank, rankDim, tensorData;
* if type == 'D', print tensorData only.
* */
assert(type == 'A' || type == 'D');
char filename[120];
stringname(110, filename, suffixname, "R.dat");
std::ofstream file;
file.open(filename, ios::trunc | ios::out);
if (!file) return false;
file.precision(width);
file.width(width);
if (type == 'A')
{
file << tensorRank <<" ";
for (uint32_t i = 0; i < tensorRank; i++) file << rankDim[i] <<" ";
file << std::endl;
}
for (BLASINT i = 0; i < dataSize; i++) file << real(tensorData[i]) << " ";
file.close();
stringname(110, filename, suffixname, "I.dat");
file.open(filename, ios::trunc | ios::out);
if (!file) return false;
file.precision(width);
file.width(width);
if (type == 'A')
{
file << tensorRank <<" ";
for (uint32_t i = 0; i < tensorRank; i++) file << rankDim[i] << " ";
file << std::endl;
}
for (BLASINT i = 0; i < dataSize; i++) file << imag(tensorData[i]) << " ";
file.close();
return true;
}
template<typename DTTYPE>
void BaseTensor<DTTYPE>::checkBaseTensor() const
{
assert(tensorRank <= MAXTRK && tensorRank > 0);
for (uint32_t i = 0; i < tensorRank; i++) assert(rankDim[i] > 0);
BLASINT td = 1;
for (uint32_t i = 0; i < tensorRank; i++) td *= rankDim[i];
assert(td == dataSize);
}
/*private functions*/
template<typename DTTYPE>
void BaseTensor<DTTYPE>::setFileSize() noexcept
{
fileSize = sizeof(long) + sizeof(uint32_t) + (tensorRank+1)*sizeof(BLASINT);
fileSize += dataSize*sizeof(DTTYPE);
}
/*shift index "il" before "iprev", require : il > iprev*/
/*将il插到iprev前面*/
template<typename DTTYPE>
bool BaseTensor<DTTYPE>::shiftBefore_(const uint32_t &il, const uint32_t &iprev, void* tmpdata, const BLASINT &tdsize)
{
#ifdef DEBUG_SHIFT
std::cout<<" ==start shiftBefore iitt :: " << std::endl;
clock_t time_begin = clock();
#endif
assert(iprev < il);
BLASINT Dl = 1;
for (unsigned i = 0; i < iprev; i++) Dl *= rankDim[i];
BLASINT Dc = 1;
for (unsigned i = iprev; i < il; i++) Dc *= rankDim[i];
BLASINT Dm = rankDim[il];
BLASINT Dr = ((dataSize/Dl)/Dc)/Dm;
BLASINT dcmr = Dc*Dm*Dr;
DTTYPE* localtmpdata = reinterpret_cast<DTTYPE*> (tmpdata);
if (tdsize >= dataSize*sizeof(DTTYPE))
{
if (Dr == 1)
{
#pragma omp parallel for schedule(dynamic, 1)
for (BLASINT l = 0; l < Dl; l++)
{
DTTYPE* pdest = tensorData + l*dcmr;
DTTYPE* psour = localtmpdata + l*dcmr;
copy(dcmr, pdest, 1, psour, 1);
llmopr::fastTransposeTo(Dc, Dm, psour, Dm, pdest, Dc);
}
}
else
{
#pragma omp parallel for schedule(dynamic, 1)
for (BLASINT l = 0; l < Dl; l++)
{
DTTYPE* pdest = tensorData + l*dcmr;
DTTYPE* psour = localtmpdata + l*dcmr;
copy(dcmr, pdest, 1, psour, 1);
llmopr::transposeLMto(Dc, Dm, Dr, psour, pdest);
}
}
}
else if (tdsize >= dcmr*sizeof(DTTYPE))
{
if (Dr == 1)
{
for (BLASINT l = 0; l < Dl; l++)
{
DTTYPE* pdest = tensorData + l*dcmr;
memcpy(localtmpdata, pdest, dcmr*sizeof(DTTYPE));
llmopr::fastTransposeTo(Dc, Dm, localtmpdata, Dm, pdest, Dc);
}
}
else
{
for (BLASINT l = 0; l < Dl; l++)
{
DTTYPE* pdest = tensorData + l*dcmr;
memcpy(localtmpdata, pdest, dcmr*sizeof(DTTYPE));
llmopr::transposeLMto(Dc, Dm, Dr, localtmpdata, pdest);
}
}
}
else
{
assert(0); // not finished yet!
return false;
}
for (uint32_t i = il; i > iprev; i--) rankDim[i] = rankDim[i-1];
rankDim[iprev] = Dm;
#ifdef CHECK_BASETENSOR
checkBaseTensor();
#endif
#ifdef DEBUG_SHIFT
std::cout<<" ==Time on shiftBefore(iitt) is " << double(clock()-time_begin)/CLOCKS_PER_SEC << std::endl;
#endif
return true;
}
/*shift index "il" before "iprev", require : il > iprev*/
/*将il插到iprev前面*/
template<typename DTTYPE>
bool BaseTensor<DTTYPE>::shiftBefore_(const uint32_t &il, const uint32_t &iprev, BaseTensor<DTTYPE> &bt) const
{
#ifdef DEBUG_SHIFT
std::cout<<" ==start shiftBefore iib :: " << std::endl;
clock_t time_begin = clock();
#endif
assert(iprev < il);
bt.resize(tensorRank, rankDim);
BLASINT Dl = 1;
for (unsigned i = 0; i < iprev; i++) Dl *= rankDim[i];
BLASINT Dc = 1;
for (unsigned i = iprev; i < il; i++) Dc *= rankDim[i];
BLASINT Dm = rankDim[il];
BLASINT Dr = ((dataSize/Dl)/Dc)/Dm;
BLASINT dcmr = Dc*Dm*Dr;
if (Dr == 1)
{
#pragma omp parallel for schedule(dynamic, 1)
for (BLASINT l = 0; l < Dl; l++)
{
DTTYPE* psour = tensorData + l*dcmr;
DTTYPE* pdest = bt.tensorData + l*dcmr;
llmopr::fastTransposeTo(Dc, Dm, psour, Dm, pdest, Dc);
}
}
else
{
#pragma omp parallel for schedule(dynamic, 1)
for (BLASINT l = 0; l < Dl; l++)
{
DTTYPE* psour = tensorData + l*dcmr;
DTTYPE* pdest = bt.tensorData + l*dcmr;
llmopr::transposeLMto(Dc, Dm, Dr, psour, pdest);
}
}
for (uint32_t i = il; i > iprev; i--) bt.rankDim[i] = bt.rankDim[i-1];
bt.rankDim[iprev] = Dm;
#ifdef CHECK_BASETENSOR
checkBaseTensor();
#endif
#ifdef DEBUG_SHIFT
std::cout<<" ==Time on shiftBefore(iib) is " << double(clock()-time_begin)/CLOCKS_PER_SEC << std::endl;
#endif
return true;
}
/*shift index "il" after "iback", require il < iback*/
/*将il插到iback后面*/
template<typename DTTYPE>
bool BaseTensor<DTTYPE>::shiftAfter_(const uint32_t &il, const uint32_t &iback, void* tmpdata, const BLASINT &tdsize)
{
#ifdef DEBUG_SHIFT
std::cout<<" ==start shiftAfter iitt :: " << std::endl;
clock_t time_begin = clock();
#endif
assert(il < iback);
BLASINT Dl = 1;
for (BLASINT l = 0; l < il; l++) Dl *= rankDim[l];
BLASINT Dm = rankDim[il];
BLASINT Dc = 1;
for (BLASINT c = il+1; c <= iback; c++) Dc *= rankDim[c];
BLASINT Dr = ((dataSize/Dl)/Dm)/Dc;
DTTYPE* localtmpdata = reinterpret_cast<DTTYPE*> (tmpdata);
BLASINT dmcr = Dm*Dc*Dr;
if (tdsize >= Dl*dmcr*sizeof(DTTYPE))
{
if (Dr == 1)
{
#pragma omp parallel for schedule(dynamic, 1)
for (BLASINT l = 0; l < Dl; l++) llmopr::fastTransposeTo(Dm, Dc, tensorData+l*dmcr, Dc, localtmpdata+l*dmcr, Dm);
copy(getDataSize(), localtmpdata, 1, tensorData, 1);
}
else
{
#pragma omp parallel for schedule(dynamic, 1)
for (BLASINT l = 0; l < Dl; l++)
{
DTTYPE* pdest = tensorData + l*dmcr;
DTTYPE* psour = localtmpdata + l*dmcr;
copy(dmcr, pdest, 1, psour, 1);
llmopr::transposeLMto(Dm, Dc, Dr, psour, pdest);
}
}
}
else if (tdsize >= dmcr*sizeof(DTTYPE))
{
if (Dr == 1)
{
for (BLASINT l = 0; l < Dl; l++)
{
DTTYPE* pdest = tensorData + l*dmcr;
memcpy(localtmpdata, pdest, dmcr*sizeof(DTTYPE));
llmopr::fastTransposeTo(Dm, Dc, localtmpdata, Dc, pdest, Dm);
}
}
else
{
for (BLASINT l = 0; l < Dl; l++)
{
DTTYPE* pdest = tensorData + l*dmcr;
memcpy(localtmpdata, pdest, dmcr*sizeof(DTTYPE));
llmopr::transposeLMto(Dm, Dc, Dr, localtmpdata, pdest);
}
}
}
else
{
assert(0);
return false;
}
for (uint32_t i = il; i < iback; i++) rankDim[i] = rankDim[i+1];
rankDim[iback] = Dm;
#ifdef CHECK_BASETENSOR
checkBaseTensor();
#endif
#ifdef DEBUG_SHIFT
std::cout<<" ==Time on shiftAfter(iitt) is " << double(clock()-time_begin)/CLOCKS_PER_SEC << std::endl;
#endif
return true;
}
/*shift index "il" after "iback", require il < iback*/
/*将il插到iback后面*/
template<typename DTTYPE>
bool BaseTensor<DTTYPE>::shiftAfter_(const uint32_t &il, const uint32_t &iback, BaseTensor<DTTYPE> &bt) const
{
#ifdef DEBUG_SHIFT
std::cout<<" ==start shiftAfter iib :: " << std::endl;
clock_t time_begin = clock();
#endif
assert(il < iback);
bt.resize(tensorRank, rankDim);
BLASINT Dl = 1;
for (BLASINT l = 0; l < il; l++) Dl *= rankDim[l];
BLASINT Dm = rankDim[il];
BLASINT Dc = 1;
for (BLASINT c = il+1; c <= iback; c++) Dc *= rankDim[c];
BLASINT Dr = ((dataSize/Dl)/Dm)/Dc;
BLASINT dmcr = Dm*Dc*Dr;
if (Dr == 1)
{
#pragma omp parallel for schedule(dynamic, 1)
for (BLASINT l = 0; l < Dl; l++) llmopr::fastTransposeTo(Dm, Dc, tensorData+l*dmcr, Dc, bt.tensorData+l*dmcr, Dm);
}
else
{
#pragma omp parallel for schedule(dynamic, 1)
for (BLASINT l = 0; l < Dl; l++)
{
DTTYPE* psour = tensorData + l*dmcr;
DTTYPE* pdest = bt.tensorData + l*dmcr;
llmopr::transposeLMto(Dm, Dc, Dr, psour, pdest);
}
}
for (uint32_t i = il; i < iback; i++) bt.rankDim[i] = bt.rankDim[i+1];
bt.rankDim[iback] = Dm;
#ifdef CHECK_BASETENSOR
checkBaseTensor();
#endif
#ifdef DEBUG_SHIFT
std::cout<<" ==Time on shiftAfter(iib) is " << double(clock()-time_begin)/CLOCKS_PER_SEC << std::endl;
#endif
return true;
}
template<typename DT> void tensorContraction(const char &cttype, const BaseTensor<DT> &btl, const uint32_t &il, const BaseTensor<DT> &btr, const uint32_t &ir, BaseTensor<DT> &bc, void* tmpdata, const BLASINT &tdsize)
{
assert(&btl != &btr);
#ifdef DEBUG_BASETENSOR_TIME_cbibitt
std::cout<<std::endl<<"***start cbibitt :: " <<std::endl;
clock_t time_begin = clock();
#endif
bc.setTensorStruct(btl, il, btr, ir, cttype);
if (bc.getMaxDataSize() < bc.getDataSize()) bc.reSetMaxDataSize(bc.getDataSize());
/*left dim*/
BLASINT Dll = 1;
for (uint32_t i = 0; i < il; i++) Dll *= btl.getRankDim(i);
BLASINT Dlm = btl.getRankDim(il);
BLASINT Dlr = (btl.getDataSize()/Dll)/Dlm;
/*right dim*/
BLASINT Drl = 1;
for (uint32_t i = 0; i < ir; i++) Drl *= btr.getRankDim(i);
BLASINT Drm = btr.getRankDim(ir);
BLASINT Drr = (btr.getDataSize()/Drl)/Drm;
assert(bc.getDataSize() == Dll*Dlr*Drl*Drr);
assert(Dlm == Drm);
DT* localtmpdata = reinterpret_cast<DT*>(tmpdata);
/*contraction*/
if (cttype == 'S')
{
char transl;
char transr = 'T';
BLASINT ldl;
BLASINT ldr;
if (il == 0)
{
transl = 'T';
ldl = Dll*Dlr;
}
else if (il+1 == btl.getTensorRank())
{
transl = 'N';
ldl = Dlm;
}
else
{
BLASINT dlmr = Dlm*Dlr;
if (tdsize >= btl.getDataSize()*sizeof(DT))
{
#pragma omp parallel for schedule(dynamic, 1)
for (BLASINT l = 0; l < Dll; l++) llmopr::fastTransposeTo(Dlm, Dlr, btl.tensorData+l*dlmr, Dlr, localtmpdata+l*dlmr, Dlm);
copy(btl.getDataSize(), localtmpdata, 1, btl.tensorData, 1);
}
else if (tdsize >= dlmr*sizeof(DT))
{
for (BLASINT l = 0; l < Dll; l++) llmopr::transposeOnsite(Dlm, Dlr, btl.tensorData+l*dlmr, localtmpdata);
}
else
{
std::cout << "warning : tmpdata is too small, tdsize is " << tdsize << ", it should be at leat " << dlmr*sizeof(DT) <<std::endl;
for (BLASINT l = 0; l < Dll; l++) llmopr::transposeOnsite(Dlm, Dlr, btl.tensorData+l*dlmr);
}
transl = 'N';
ldl = Dlm;
}
const DT* psour = btr.tensorData;
if (ir == 0)
{
transr = 'N';
ldr = Drl*Drr;
}
else if (ir+1 == btr.getTensorRank())
{
transr = 'T';
ldr = Drm;
}
else
{
BLASINT drmr = Drm*Drr;
if (tdsize >= btr.getDataSize()*sizeof(DT))
{
#pragma omp parallel for schedule(dynamic, 1)
for (BLASINT l = 0; l < Drl; l++) llmopr::fastTransposeTo(Drm, Drr, btr.tensorData+l*drmr, Drr, localtmpdata+l*drmr, Drm);
psour = localtmpdata;
}
else if (tdsize >= drmr*sizeof(DT))
{
for (BLASINT l = 0; l < Drl; l++) llmopr::transposeOnsite(Drm, Drr, btr.tensorData+l*drmr, localtmpdata);
}
else
{
std::cout << "warning : tmpdata is too small, tdsize is " << tdsize << ", it should be at least "<<drmr*sizeof(DT)<<std::endl;
for (BLASINT l = 0; l < Drl; l++) llmopr::transposeOnsite(Drm, Drr, btr.tensorData+l*drmr);
}
transr = 'T';
ldr = Drm;
}
gemm(transl, transr, Dll*Dlr, Drl*Drr, Dlm, 1.0, btl.tensorData, ldl, psour, ldr, 0.0, bc.tensorData, Drl*Drr);
/*restore the data in btr*/
if (ir != 0 && ir+1 != btr.getTensorRank() && tdsize < btr.getDataSize()*sizeof(DT))
{
BLASINT drmr = Drm*Drr;
if (tdsize >= drmr*sizeof(DT))
{
for (BLASINT l = 0; l < Drl; l++) llmopr::transposeOnsite(Drr, Drm, btr.tensorData+l*drmr, localtmpdata);
}
else
{
for (BLASINT l = 0; l < Drl; l++) llmopr::transposeOnsite(Drr, Drm, btr.tensorData+l*drmr);
}
}
/*restore the data in btl*/
if (il != 0 && il+1 != btl.getTensorRank())
{
BLASINT dlmr = Dlm*Dlr;
if (tdsize >= btl.getDataSize()*sizeof(DT))
{
#pragma omp parallel for schedule(dynamic, 1)
for (BLASINT l = 0; l < Dll; l++) llmopr::fastTransposeTo(Dlr, Dlm, btl.tensorData+l*dlmr, Dlm, localtmpdata+l*dlmr, Dlr);
copy(btl.getDataSize(), localtmpdata, 1, btl.tensorData, 1);
}
else if (tdsize >= dlmr*sizeof(DT))
{
for (BLASINT l = 0; l < Dll; l++) llmopr::transposeOnsite(Dlr, Dlm, btl.tensorData+l*dlmr, localtmpdata);
}
else
{
for (BLASINT l = 0; l < Dll; l++) llmopr::transposeOnsite(Dlr, Dlm, btl.tensorData+l*dlmr);
}
}
}
else if (cttype == 'D')
{
if (Drl == 1 && Dll == 1)
{
gemm('T', 'N', Dlr, Drr, Dlm, 1.0, btl.tensorData, Dlr, btr.tensorData, Drr, 0.0, bc.tensorData, Drr);
}
else if (Drl == 1 && Dlr == 1)
{
gemm('N', 'N', Dll, Drr, Dlm, 1.0, btl.tensorData, Dlm, btr.tensorData, Drr, 0.0, bc.tensorData, Drr);
}
else if (Drl == 1 && tdsize >= btl.getDataSize()*sizeof(DT))
{
llmopr::transposeMRto(Dll, Dlm, Dlr, btl.tensorData, localtmpdata);
gemm('N', 'N', Dll*Dlr, Drr, Dlm, 1.0, localtmpdata, Dlm, btr.tensorData, Drr, 0.0, bc.tensorData, Drr);
}
else if (Dlr == 1 && Drr == 1)
{
gemm('N', 'T', Dll, Drl, Dlm, 1.0, btl.tensorData, Dlm, btr.tensorData, Drm, 0.0, bc.tensorData, Drl);
}
else if (Dlr == 1 && tdsize >= btr.getDataSize()*sizeof(DT))
{
llmopr::transposeLMto(Drl, Drm, Drr, btr.tensorData, localtmpdata);
gemm('N', 'N', Dll, Drl*Drr, Dlm, 1.0, btl.tensorData, Dlm, localtmpdata, Drl*Drr, 0.0, bc.tensorData, Drl*Drr);
}
else if (Dll == 1 && Drr == 1)
{
gemm('N', 'N', Drl, Dlr, Dlm, 1.0, btl.tensorData, Dlm, btr.tensorData, Drm, 0.0, bc.tensorData, Dlr);
}
else if (Drr == 1)
{
#pragma omp parallel for schedule(dynamic, 1)
for (BLASINT l = 0; l < Dll; l++)
{
const DT* ltdata = btl.tensorData+l*Dlm*Dlr;
DT* localdata = bc.tensorData+l*Drl*Drr;
gemm('N', 'N', Drl, Drr, Dlm, 1.0, ltdata, Dlr, btr.tensorData, Drm, 0.0, localdata, Drr);
}
}
else
{
#pragma omp parallel for schedule(dynamic, 1)
for (BLASINT lr = 0; lr < Dll*Drl; lr++)
{
const BLASINT l = lr/Drl;
const BLASINT r = lr%Drl;
const DT* ltdata = btl.tensorData+l*Dlm*Dlr;
const DT* rtdata = btr.tensorData+r*Drm*Drr;
DT* localdata = bc.tensorData+(l*Drl+r)*Dlr*Drr;
gemm('T', 'N', Dlr, Drr, Dlm, 1.0, ltdata, Dlr, rtdata, Drr, 0.0, localdata, Drr);
}
}
}
else // (cttype == 'I')
{
if (Dll*Drl > 10)
{
#pragma omp parallel for schedule(dynamic, 1)
for (BLASINT lr = 0; lr < Dll*Drl; lr++)
{
const BLASINT l = lr/Drl;
const BLASINT r = lr%Drl;
const DT* ltdata = btl.tensorData+l*Dlm*Dlr;
const DT* rtdata = btr.tensorData+r*Drm*Drr;
DT* localdata = bc.tensorData+(l*Drl+r)*Drr*Dlr;
gemm('T', 'N', Drr, Dlr, Dlm, 1.0, rtdata, Drr, ltdata, Dlr, 0.0, localdata, Dlr);
}
}
else
{
for (BLASINT lr = 0; lr < Dll*Drl; lr++)
{
const BLASINT l = lr/Drl;
const BLASINT r = lr%Drl;
const DT* ltdata = btl.tensorData+l*Dlm*Dlr;
const DT* rtdata = btr.tensorData+r*Drm*Drr;
DT* localdata = bc.tensorData+(l*Drl+r)*Drr*Dlr;
gemm('T', 'N', Drr, Dlr, Dlm, 1.0, rtdata, Drr, ltdata, Dlr, 0.0, localdata, Dlr);
}
}
}
bc.setFileSize();
#ifdef CHECK_BASETENSOR
bc.checkBaseTensor();
#endif
#ifdef DEBUG_BASETENSOR_TIME_cbibitt
std::cout<<"***Time on BaseTensor(cbibitt) is " << double(clock()-time_begin)/CLOCKS_PER_SEC << std::endl<<std::endl;
#endif
}
#endif
|
normal_gap_process.h | // KRATOS ______ __ __ _____ __ __ __
// / ____/___ ____ / /_____ ______/ /_/ ___// /________ _______/ /___ ___________ _/ /
// / / / __ \/ __ \/ __/ __ `/ ___/ __/\__ \/ __/ ___/ / / / ___/ __/ / / / ___/ __ `/ /
// / /___/ /_/ / / / / /_/ /_/ / /__/ /_ ___/ / /_/ / / /_/ / /__/ /_/ /_/ / / / /_/ / /
// \____/\____/_/ /_/\__/\__,_/\___/\__//____/\__/_/ \__,_/\___/\__/\__,_/_/ \__,_/_/ MECHANICS
//
// License: BSD License
// license: ContactStructuralMechanicsApplication/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_NORMAL_GAP_PROCESS_H_INCLUDED )
#define KRATOS_NORMAL_GAP_PROCESS_H_INCLUDED
// System includes
// External includes
// Project includes
#include "processes/process.h"
#include "includes/model_part.h"
#include "processes/simple_mortar_mapper_process.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class NormalGapProcess
* @ingroup ContactStructuralMechanicsApplication
* @brief This process computes the normal gap
* @author Vicente Mataix Ferrandiz
* @tparam TDim The dimension of work
* @tparam TNumNodes The number of nodes of the slave
* @tparam TNumNodesMaster The number of nodes of the master
*/
template<SizeType TDim, SizeType TNumNodes, SizeType TNumNodesMaster = TNumNodes>
class KRATOS_API(CONTACT_STRUCTURAL_MECHANICS_APPLICATION) NormalGapProcess
: public Process
{
public:
///@name Type Definitions
///@{
/// The type of mapper considered
typedef SimpleMortarMapperProcess<TDim, TNumNodes, Variable<array_1d<double, 3>>, TNumNodesMaster> MapperType;
/// General type definitions
typedef ModelPart::NodesContainerType NodesArrayType;
/// The definition of zero tolerance
static constexpr double ZeroTolerance = std::numeric_limits<double>::epsilon();
/// Pointer definition of NormalGapProcess
KRATOS_CLASS_POINTER_DEFINITION( NormalGapProcess );
///@}
///@name Enum's
///@{
///@}
///@name Life Cycle
///@{
/**
* @brief The constructor of the normal gap process uses the following inputs:
* @param rMasterModelPart The master model part to be considered
* @param rSlaveModelPart The slave model part to be considered
*/
NormalGapProcess(
ModelPart& rMasterModelPart,
ModelPart& rSlaveModelPart,
const bool SearchOrientation = true
) : mrMasterModelPart(rMasterModelPart),
mrSlaveModelPart(rSlaveModelPart),
mSearchOrientation(SearchOrientation)
{
}
virtual ~NormalGapProcess()= default;;
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
void operator()()
{
Execute();
}
///@}
///@name Operations
///@{
/**
* @brief Execute method is used to execute the Process algorithms.
*/
void Execute() override;
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/************************************ GET INFO *************************************/
/***********************************************************************************/
std::string Info() const override
{
return "NormalGapProcess";
}
/************************************ PRINT INFO ***********************************/
/***********************************************************************************/
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
ModelPart& mrMasterModelPart; /// The master model part to be considered
ModelPart& mrSlaveModelPart; /// The slave model part to be considered
const bool mSearchOrientation; /// The orientation of the search (inverted or not)
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/**
* @brief This method switchs the flag of an array of nodes
* @param rNodes The set of nodes where the flags are reset
*/
static inline void SwitchFlagNodes(NodesArrayType& rNodes)
{
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(rNodes.size()); ++i) {
auto it_node = rNodes.begin() + i;
it_node->Flip(SLAVE);
it_node->Flip(MASTER);
}
}
/**
* @brief This method computes the normal gap
* @param rNodes The set of nodes where the gap is computed
*/
void ComputeNormalGap(NodesArrayType& rNodes);
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; // Class NormalGapProcess
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
/****************************** INPUT STREAM FUNCTION ******************************/
/***********************************************************************************/
template<SizeType TDim, SizeType TNumNodes, SizeType TNumNodesMaster>
inline std::istream& operator >> (std::istream& rIStream,
NormalGapProcess<TDim, TNumNodes, TNumNodesMaster>& rThis);
/***************************** OUTPUT STREAM FUNCTION ******************************/
/***********************************************************************************/
template<SizeType TDim, SizeType TNumNodes, SizeType TNumNodesMaster>
inline std::ostream& operator << (std::ostream& rOStream,
const NormalGapProcess<TDim, TNumNodes, TNumNodesMaster>& rThis)
{
return rOStream;
}
///@}
} // namespace Kratos.
#endif // KRATOS_NORMAL_GAP_PROCESS_H_INCLUDED defined
|
compare.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO M M PPPP AAA RRRR EEEEE %
% C O O MM MM P P A A R R E %
% C O O M M M PPPP AAAAA RRRR EEE %
% C O O M M P A A R R E %
% CCCC OOO M M P A A R R EEEEE %
% %
% %
% MagickCore Image Comparison Methods %
% %
% Software Design %
% Cristy %
% December 2003 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/compare.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/fourier.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/registry.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/transform.h"
#include "MagickCore/utility.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p a r e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompareImages() compares one or more pixel channels of an image to a
% reconstructed image and returns the difference image.
%
% The format of the CompareImages method is:
%
% Image *CompareImages(const Image *image,const Image *reconstruct_image,
% const MetricType metric,double *distortion,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o distortion: the computed distortion between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CompareImages(Image *image,const Image *reconstruct_image,
const MetricType metric,double *distortion,ExceptionInfo *exception)
{
CacheView
*highlight_view,
*image_view,
*reconstruct_view;
const char
*artifact;
double
fuzz;
Image
*clone_image,
*difference_image,
*highlight_image;
MagickBooleanType
status;
PixelInfo
highlight,
lowlight,
masklight;
RectangleInfo
geometry;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
assert(distortion != (double *) NULL);
*distortion=0.0;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=GetImageDistortion(image,reconstruct_image,metric,distortion,
exception);
if (status == MagickFalse)
return((Image *) NULL);
columns=MagickMax(image->columns,reconstruct_image->columns);
rows=MagickMax(image->rows,reconstruct_image->rows);
SetGeometry(image,&geometry);
geometry.width=columns;
geometry.height=rows;
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageMask(clone_image,ReadPixelMask,(Image *) NULL,exception);
difference_image=ExtentImage(clone_image,&geometry,exception);
clone_image=DestroyImage(clone_image);
if (difference_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageAlphaChannel(difference_image,OpaqueAlphaChannel,exception);
highlight_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (highlight_image == (Image *) NULL)
{
difference_image=DestroyImage(difference_image);
return((Image *) NULL);
}
status=SetImageStorageClass(highlight_image,DirectClass,exception);
if (status == MagickFalse)
{
difference_image=DestroyImage(difference_image);
highlight_image=DestroyImage(highlight_image);
return((Image *) NULL);
}
(void) SetImageMask(highlight_image,ReadPixelMask,(Image *) NULL,exception);
(void) SetImageAlphaChannel(highlight_image,OpaqueAlphaChannel,exception);
(void) QueryColorCompliance("#f1001ecc",AllCompliance,&highlight,exception);
artifact=GetImageArtifact(image,"compare:highlight-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&highlight,exception);
(void) QueryColorCompliance("#ffffffcc",AllCompliance,&lowlight,exception);
artifact=GetImageArtifact(image,"compare:lowlight-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&lowlight,exception);
(void) QueryColorCompliance("#888888cc",AllCompliance,&masklight,exception);
artifact=GetImageArtifact(image,"compare:masklight-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&masklight,exception);
/*
Generate difference image.
*/
status=MagickTrue;
fuzz=GetFuzzyColorDistance(image,reconstruct_image);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
highlight_view=AcquireAuthenticCacheView(highlight_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,highlight_image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
MagickBooleanType
sync;
const Quantum
*magick_restrict p,
*magick_restrict q;
Quantum
*magick_restrict r;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
r=QueueCacheViewAuthenticPixels(highlight_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL) ||
(r == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
MagickStatusType
difference;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
SetPixelViaPixelInfo(highlight_image,&masklight,r);
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
r+=GetPixelChannels(highlight_image);
continue;
}
difference=MagickFalse;
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance,
pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
pixel=(double) p[i]-GetPixelChannel(reconstruct_image,channel,q);
else
pixel=Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q);
distance=pixel*pixel;
if (distance >= fuzz)
{
difference=MagickTrue;
break;
}
}
if (difference == MagickFalse)
SetPixelViaPixelInfo(highlight_image,&lowlight,r);
else
SetPixelViaPixelInfo(highlight_image,&highlight,r);
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
r+=GetPixelChannels(highlight_image);
}
sync=SyncCacheViewAuthenticPixels(highlight_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
highlight_view=DestroyCacheView(highlight_view);
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
(void) CompositeImage(difference_image,highlight_image,image->compose,
MagickTrue,0,0,exception);
highlight_image=DestroyImage(highlight_image);
if (status == MagickFalse)
difference_image=DestroyImage(difference_image);
return(difference_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e D i s t o r t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDistortion() compares one or more pixel channels of an image to a
% reconstructed image and returns the specified distortion metric.
%
% The format of the GetImageDistortion method is:
%
% MagickBooleanType GetImageDistortion(const Image *image,
% const Image *reconstruct_image,const MetricType metric,
% double *distortion,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o distortion: the computed distortion between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
fuzz;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
/*
Compute the absolute difference in pixels between two images.
*/
status=MagickTrue;
fuzz=GetFuzzyColorDistance(image,reconstruct_image);
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
j,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
MagickBooleanType
difference;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
difference=MagickFalse;
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance,
pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
pixel=(double) p[i]-GetPixelChannel(reconstruct_image,channel,q);
else
pixel=Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q);
distance=pixel*pixel;
if (distance >= fuzz)
{
channel_distortion[i]++;
difference=MagickTrue;
}
}
if (difference != MagickFalse)
channel_distortion[CompositePixelChannel]++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetAbsoluteDistortion)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(status);
}
static MagickBooleanType GetFuzzDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area;
MagickBooleanType
status;
ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
const Quantum
*magick_restrict p,
*magick_restrict q;
size_t
local_area = 0;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*(p[i]-GetPixelChannel(reconstruct_image,
channel,q));
else
distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(reconstruct_image,
channel,q));
channel_distortion[i]+=distance*distance;
channel_distortion[CompositePixelChannel]+=distance*distance;
}
local_area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetFuzzDistortion)
#endif
{
area+=local_area;
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]*=area;
distortion[CompositePixelChannel]/=(double) GetImageChannels(image);
distortion[CompositePixelChannel]=sqrt(distortion[CompositePixelChannel]);
return(status);
}
static MagickBooleanType GetMeanAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area;
MagickBooleanType
status;
ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
const Quantum
*magick_restrict p,
*magick_restrict q;
size_t
local_area = 0;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*fabs((double) (p[i]-(double)
GetPixelChannel(reconstruct_image,channel,q)));
else
distance=QuantumScale*fabs((double) (Sa*p[i]-Da*
GetPixelChannel(reconstruct_image,channel,q)));
channel_distortion[i]+=distance;
channel_distortion[CompositePixelChannel]+=distance;
}
local_area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetMeanAbsoluteError)
#endif
{
area+=local_area;
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]*=area;
distortion[CompositePixelChannel]/=(double) GetImageChannels(image);
return(status);
}
static MagickBooleanType GetMeanErrorPerPixel(Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
double
area,
maximum_error,
mean_error;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
area=0.0;
maximum_error=0.0;
mean_error=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=fabs((double) (p[i]-(double)
GetPixelChannel(reconstruct_image,channel,q)));
else
distance=fabs((double) (Sa*p[i]-Da*
GetPixelChannel(reconstruct_image,channel,q)));
distortion[i]+=distance;
distortion[CompositePixelChannel]+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
image->error.mean_error_per_pixel=area*distortion[CompositePixelChannel];
image->error.normalized_mean_error=area*QuantumScale*QuantumScale*mean_error;
image->error.normalized_maximum_error=QuantumScale*maximum_error;
return(status);
}
static MagickBooleanType GetMeanSquaredDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area;
MagickBooleanType
status;
ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q;
double
channel_distortion[MaxPixelChannels+1];
size_t
local_area = 0;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*(p[i]-GetPixelChannel(reconstruct_image,
channel,q));
else
distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(reconstruct_image,
channel,q));
channel_distortion[i]+=distance*distance;
channel_distortion[CompositePixelChannel]+=distance*distance;
}
local_area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetMeanSquaredError)
#endif
{
area+=local_area;
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]*=area;
distortion[CompositePixelChannel]/=GetImageChannels(image);
return(status);
}
static MagickBooleanType GetNormalizedCrossCorrelationDistortion(
const Image *image,const Image *reconstruct_image,double *distortion,
ExceptionInfo *exception)
{
#define SimilarityImageTag "Similarity/Image"
CacheView
*image_view,
*reconstruct_view;
ChannelStatistics
*image_statistics,
*reconstruct_statistics;
double
area;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
channels,
i;
size_t
columns,
rows;
ssize_t
y;
/*
Normalize to account for variation due to lighting and exposure condition.
*/
image_statistics=GetImageStatistics(image,exception);
reconstruct_statistics=GetImageStatistics(reconstruct_image,exception);
if ((image_statistics == (ChannelStatistics *) NULL) ||
(reconstruct_statistics == (ChannelStatistics *) NULL))
{
if (image_statistics != (ChannelStatistics *) NULL)
image_statistics=(ChannelStatistics *) RelinquishMagickMemory(
image_statistics);
if (reconstruct_statistics != (ChannelStatistics *) NULL)
reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory(
reconstruct_statistics);
return(MagickFalse);
}
status=MagickTrue;
progress=0;
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) columns; x++)
{
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
}
area=PerceptibleReciprocal(area);
for (y=0; y < (ssize_t) rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distortion[i]+=area*QuantumScale*((double) p[i]-
image_statistics[channel].mean)*(GetPixelChannel(reconstruct_image,
channel,q)-reconstruct_statistics[channel].mean);
else
distortion[i]+=area*QuantumScale*(Sa*p[i]-
image_statistics[channel].mean)*(Da*GetPixelChannel(
reconstruct_image,channel,q)-reconstruct_statistics[channel].mean);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SimilarityImageTag,progress,rows);
if (proceed == MagickFalse)
{
status=MagickFalse;
break;
}
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
/*
Divide by the standard deviation.
*/
channels=0;
distortion[CompositePixelChannel]=0.0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
gamma;
PixelChannel channel = GetPixelChannelChannel(image,i);
gamma=image_statistics[channel].standard_deviation*
reconstruct_statistics[channel].standard_deviation;
if (fabs(gamma) >= MagickEpsilon)
{
gamma=PerceptibleReciprocal(gamma);
distortion[i]=QuantumRange*gamma*distortion[i];
distortion[CompositePixelChannel]+=distortion[i]*distortion[i];
channels++;
}
}
distortion[CompositePixelChannel]=sqrt(distortion[CompositePixelChannel]/
channels);
/*
Free resources.
*/
reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory(
reconstruct_statistics);
image_statistics=(ChannelStatistics *) RelinquishMagickMemory(
image_statistics);
return(status);
}
static MagickBooleanType GetPeakAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
j,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*fabs((double) (p[i]-(double)
GetPixelChannel(reconstruct_image,channel,q)));
else
distance=QuantumScale*fabs((double) (Sa*p[i]-Da*
GetPixelChannel(reconstruct_image,channel,q)));
if (distance > channel_distortion[i])
channel_distortion[i]=distance;
if (distance > channel_distortion[CompositePixelChannel])
channel_distortion[CompositePixelChannel]=distance;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetPeakAbsoluteError)
#endif
for (j=0; j <= MaxPixelChannels; j++)
if (channel_distortion[j] > distortion[j])
distortion[j]=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(status);
}
static inline double MagickLog10(const double x)
{
#define Log10Epsilon (1.0e-11)
if (fabs(x) < Log10Epsilon)
return(log10(Log10Epsilon));
return(log10(fabs(x)));
}
static MagickBooleanType GetPeakSignalToNoiseRatio(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
i;
status=GetMeanSquaredDistortion(image,reconstruct_image,distortion,exception);
for (i=0; i <= MaxPixelChannels; i++)
if (fabs(distortion[i]) < MagickEpsilon)
distortion[i]=INFINITY;
else
distortion[i]=10.0*MagickLog10(1.0)-10.0*MagickLog10(distortion[i]);
return(status);
}
static MagickBooleanType GetPerceptualHashDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
ChannelPerceptualHash
*channel_phash,
*reconstruct_phash;
const char
*artifact;
MagickBooleanType
normalize;
ssize_t
channel;
/*
Compute perceptual hash in the sRGB colorspace.
*/
channel_phash=GetImagePerceptualHash(image,exception);
if (channel_phash == (ChannelPerceptualHash *) NULL)
return(MagickFalse);
reconstruct_phash=GetImagePerceptualHash(reconstruct_image,exception);
if (reconstruct_phash == (ChannelPerceptualHash *) NULL)
{
channel_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(
channel_phash);
return(MagickFalse);
}
artifact=GetImageArtifact(image,"phash:normalize");
normalize=(artifact == (const char *) NULL) ||
(IsStringTrue(artifact) == MagickFalse) ? MagickFalse : MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (channel=0; channel < MaxPixelChannels; channel++)
{
double
difference;
ssize_t
i;
difference=0.0;
for (i=0; i < MaximumNumberOfImageMoments; i++)
{
double
alpha,
beta;
ssize_t
j;
for (j=0; j < (ssize_t) channel_phash[0].number_colorspaces; j++)
{
alpha=channel_phash[channel].phash[j][i];
beta=reconstruct_phash[channel].phash[j][i];
if (normalize == MagickFalse)
difference+=(beta-alpha)*(beta-alpha);
else
difference=sqrt((beta-alpha)*(beta-alpha)/
channel_phash[0].number_channels);
}
}
distortion[channel]+=difference;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetPerceptualHashDistortion)
#endif
distortion[CompositePixelChannel]+=difference;
}
/*
Free resources.
*/
reconstruct_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(
reconstruct_phash);
channel_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(channel_phash);
return(MagickTrue);
}
static MagickBooleanType GetRootMeanSquaredDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
i;
status=GetMeanSquaredDistortion(image,reconstruct_image,distortion,exception);
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]=sqrt(distortion[i]);
return(status);
}
static MagickBooleanType GetStructuralSimilarityDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
#define SSIMRadius 5.0
#define SSIMSigma 1.5
#define SSIMBlocksize 8
#define SSIMK1 0.01
#define SSIMK2 0.03
#define SSIML 1.0
CacheView
*image_view,
*reconstruct_view;
char
geometry[MagickPathExtent];
const char
*artifact;
double
area,
c1,
c2,
radius,
sigma;
KernelInfo
*kernel_info;
MagickBooleanType
status;
ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
/*
Compute structural similarity index @
https://en.wikipedia.org/wiki/Structural_similarity.
*/
radius=SSIMRadius;
artifact=GetImageArtifact(image,"compare:ssim-radius");
if (artifact != (const char *) NULL)
radius=StringToDouble(artifact,(char **) NULL);
sigma=SSIMSigma;
artifact=GetImageArtifact(image,"compare:ssim-sigma");
if (artifact != (const char *) NULL)
sigma=StringToDouble(artifact,(char **) NULL);
(void) FormatLocaleString(geometry,MagickPathExtent,"gaussian:%.20gx%.20g",
radius,sigma);
kernel_info=AcquireKernelInfo(geometry,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
c1=pow(SSIMK1*SSIML,2.0);
artifact=GetImageArtifact(image,"compare:ssim-k1");
if (artifact != (const char *) NULL)
c1=pow(StringToDouble(artifact,(char **) NULL)*SSIML,2.0);
c2=pow(SSIMK2*SSIML,2.0);
artifact=GetImageArtifact(image,"compare:ssim-k2");
if (artifact != (const char *) NULL)
c2=pow(StringToDouble(artifact,(char **) NULL)*SSIML,2.0);
status=MagickTrue;
area=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,reconstruct_image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
const Quantum
*magick_restrict p,
*magick_restrict q;
size_t
local_area = 0;
ssize_t
i,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) kernel_info->width/2L),y-
((ssize_t) kernel_info->height/2L),columns+kernel_info->width,
kernel_info->height,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,-((ssize_t) kernel_info->width/
2L),y-((ssize_t) kernel_info->height/2L),columns+kernel_info->width,
kernel_info->height,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
x_pixel_mu[MaxPixelChannels+1],
x_pixel_sigma_squared[MaxPixelChannels+1],
xy_sigma[MaxPixelChannels+1],
y_pixel_mu[MaxPixelChannels+1],
y_pixel_sigma_squared[MaxPixelChannels+1];
const Quantum
*magick_restrict reference,
*magick_restrict target;
MagickRealType
*k;
ssize_t
v;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
(void) memset(x_pixel_mu,0,sizeof(x_pixel_mu));
(void) memset(x_pixel_sigma_squared,0,sizeof(x_pixel_sigma_squared));
(void) memset(xy_sigma,0,sizeof(xy_sigma));
(void) memset(x_pixel_sigma_squared,0,sizeof(y_pixel_sigma_squared));
(void) memset(y_pixel_mu,0,sizeof(y_pixel_mu));
(void) memset(y_pixel_sigma_squared,0,sizeof(y_pixel_sigma_squared));
k=kernel_info->values;
reference=p;
target=q;
for (v=0; v < (ssize_t) kernel_info->height; v++)
{
ssize_t
u;
for (u=0; u < (ssize_t) kernel_info->width; u++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
x_pixel,
y_pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(
reconstruct_image,channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
x_pixel=QuantumScale*reference[i];
x_pixel_mu[i]+=(*k)*x_pixel;
x_pixel_sigma_squared[i]+=(*k)*x_pixel*x_pixel;
y_pixel=QuantumScale*
GetPixelChannel(reconstruct_image,channel,target);
y_pixel_mu[i]+=(*k)*y_pixel;
y_pixel_sigma_squared[i]+=(*k)*y_pixel*y_pixel;
xy_sigma[i]+=(*k)*x_pixel*y_pixel;
}
k++;
reference+=GetPixelChannels(image);
target+=GetPixelChannels(reconstruct_image);
}
reference+=GetPixelChannels(image)*columns;
target+=GetPixelChannels(reconstruct_image)*columns;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
ssim,
x_pixel_mu_squared,
x_pixel_sigmas_squared,
xy_mu,
xy_sigmas,
y_pixel_mu_squared,
y_pixel_sigmas_squared;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(
reconstruct_image,channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
x_pixel_mu_squared=x_pixel_mu[i]*x_pixel_mu[i];
y_pixel_mu_squared=y_pixel_mu[i]*y_pixel_mu[i];
xy_mu=x_pixel_mu[i]*y_pixel_mu[i];
xy_sigmas=xy_sigma[i]-xy_mu;
x_pixel_sigmas_squared=x_pixel_sigma_squared[i]-x_pixel_mu_squared;
y_pixel_sigmas_squared=y_pixel_sigma_squared[i]-y_pixel_mu_squared;
ssim=((2.0*xy_mu+c1)*(2.0*xy_sigmas+c2))/
((x_pixel_mu_squared+y_pixel_mu_squared+c1)*
(x_pixel_sigmas_squared+y_pixel_sigmas_squared+c2));
channel_distortion[i]+=ssim;
channel_distortion[CompositePixelChannel]+=ssim;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
local_area++;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetStructuralSimilarityDistortion)
#endif
{
area+=local_area;
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]+=channel_distortion[i];
}
}
image_view=DestroyCacheView(image_view);
reconstruct_view=DestroyCacheView(reconstruct_view);
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) || ((traits & UpdatePixelTrait) == 0))
continue;
distortion[j]/=area;
}
distortion[CompositePixelChannel]/=area;
distortion[CompositePixelChannel]/=(double) GetImageChannels(image);
kernel_info=DestroyKernelInfo(kernel_info);
return(status);
}
static MagickBooleanType GetStructuralDisimilarityDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
i;
status=GetStructuralSimilarityDistortion(image,reconstruct_image,
distortion,exception);
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]=(1.0-(distortion[i]))/2.0;
return(status);
}
MagickExport MagickBooleanType GetImageDistortion(Image *image,
const Image *reconstruct_image,const MetricType metric,double *distortion,
ExceptionInfo *exception)
{
double
*channel_distortion;
MagickBooleanType
status;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
assert(distortion != (double *) NULL);
*distortion=0.0;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
/*
Get image distortion.
*/
length=MaxPixelChannels+1UL;
channel_distortion=(double *) AcquireQuantumMemory(length,
sizeof(*channel_distortion));
if (channel_distortion == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(channel_distortion,0,length*
sizeof(*channel_distortion));
switch (metric)
{
case AbsoluteErrorMetric:
{
status=GetAbsoluteDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case FuzzErrorMetric:
{
status=GetFuzzDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanAbsoluteErrorMetric:
{
status=GetMeanAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case MeanErrorPerPixelErrorMetric:
{
status=GetMeanErrorPerPixel(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanSquaredErrorMetric:
{
status=GetMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case NormalizedCrossCorrelationErrorMetric:
default:
{
status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakAbsoluteErrorMetric:
{
status=GetPeakAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakSignalToNoiseRatioErrorMetric:
{
status=GetPeakSignalToNoiseRatio(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PerceptualHashErrorMetric:
{
status=GetPerceptualHashDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case RootMeanSquaredErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralSimilarityErrorMetric:
{
status=GetStructuralSimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralDissimilarityErrorMetric:
{
status=GetStructuralDisimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
}
*distortion=channel_distortion[CompositePixelChannel];
channel_distortion=(double *) RelinquishMagickMemory(channel_distortion);
(void) FormatImageProperty(image,"distortion","%.*g",GetMagickPrecision(),
*distortion);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e D i s t o r t i o n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDistortions() compares the pixel channels of an image to a
% reconstructed image and returns the specified distortion metric for each
% channel.
%
% The format of the GetImageDistortions method is:
%
% double *GetImageDistortions(const Image *image,
% const Image *reconstruct_image,const MetricType metric,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport double *GetImageDistortions(Image *image,
const Image *reconstruct_image,const MetricType metric,
ExceptionInfo *exception)
{
double
*channel_distortion;
MagickBooleanType
status;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
/*
Get image distortion.
*/
length=MaxPixelChannels+1UL;
channel_distortion=(double *) AcquireQuantumMemory(length,
sizeof(*channel_distortion));
if (channel_distortion == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(channel_distortion,0,length*
sizeof(*channel_distortion));
status=MagickTrue;
switch (metric)
{
case AbsoluteErrorMetric:
{
status=GetAbsoluteDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case FuzzErrorMetric:
{
status=GetFuzzDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanAbsoluteErrorMetric:
{
status=GetMeanAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case MeanErrorPerPixelErrorMetric:
{
status=GetMeanErrorPerPixel(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanSquaredErrorMetric:
{
status=GetMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case NormalizedCrossCorrelationErrorMetric:
default:
{
status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakAbsoluteErrorMetric:
{
status=GetPeakAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakSignalToNoiseRatioErrorMetric:
{
status=GetPeakSignalToNoiseRatio(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PerceptualHashErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case RootMeanSquaredErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralSimilarityErrorMetric:
{
status=GetStructuralSimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralDissimilarityErrorMetric:
{
status=GetStructuralDisimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
}
if (status == MagickFalse)
{
channel_distortion=(double *) RelinquishMagickMemory(channel_distortion);
return((double *) NULL);
}
return(channel_distortion);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e s E q u a l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImagesEqual() compare the pixels of two images and returns immediately
% if any pixel is not identical.
%
% The format of the IsImagesEqual method is:
%
% MagickBooleanType IsImagesEqual(const Image *image,
% const Image *reconstruct_image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsImagesEqual(const Image *image,
const Image *reconstruct_image,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
break;
for (x=0; x < (ssize_t) columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=fabs((double) (p[i]-(double) GetPixelChannel(reconstruct_image,
channel,q)));
if (distance >= MagickEpsilon)
break;
}
if (i < (ssize_t) GetPixelChannels(image))
break;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
if (x < (ssize_t) columns)
break;
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(y < (ssize_t) rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C o l o r M e t r i c %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColorMetric() measures the difference between colors at each pixel
% location of two images. A value other than 0 means the colors match
% exactly. Otherwise an error measure is computed by summing over all
% pixels in an image the distance squared in RGB space between each image
% pixel and its corresponding pixel in the reconstruct image. The error
% measure is assigned to these image members:
%
% o mean_error_per_pixel: The mean error for any single pixel in
% the image.
%
% o normalized_mean_error: The normalized mean quantization error for
% any single pixel in the image. This distance measure is normalized to
% a range between 0 and 1. It is independent of the range of red, green,
% and blue values in the image.
%
% o normalized_maximum_error: The normalized maximum quantization
% error for any single pixel in the image. This distance measure is
% normalized to a range between 0 and 1. It is independent of the range
% of red, green, and blue values in your image.
%
% A small normalized mean square error, accessed as
% image->normalized_mean_error, suggests the images are very similar in
% spatial layout and color.
%
% The format of the SetImageColorMetric method is:
%
% MagickBooleanType SetImageColorMetric(Image *image,
% const Image *reconstruct_image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageColorMetric(Image *image,
const Image *reconstruct_image,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area,
maximum_error,
mean_error,
mean_error_per_pixel;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
area=0.0;
maximum_error=0.0;
mean_error_per_pixel=0.0;
mean_error=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=fabs((double) (p[i]-(double) GetPixelChannel(reconstruct_image,
channel,q)));
if (distance >= MagickEpsilon)
{
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
}
area++;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
image->error.mean_error_per_pixel=(double) (mean_error_per_pixel/area);
image->error.normalized_mean_error=(double) (QuantumScale*QuantumScale*
mean_error/area);
image->error.normalized_maximum_error=(double) (QuantumScale*maximum_error);
status=image->error.mean_error_per_pixel == 0.0 ? MagickTrue : MagickFalse;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S i m i l a r i t y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SimilarityImage() compares the reference image of the image and returns the
% best match offset. In addition, it returns a similarity image such that an
% exact match location is completely white and if none of the pixels match,
% black, otherwise some gray level in-between.
%
% The format of the SimilarityImageImage method is:
%
% Image *SimilarityImage(const Image *image,const Image *reference,
% const MetricType metric,const double similarity_threshold,
% RectangleInfo *offset,double *similarity,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reference: find an area of the image that closely resembles this image.
%
% o metric: the metric.
%
% o similarity_threshold: minimum distortion for (sub)image match.
%
% o offset: the best match offset of the reference image within the image.
%
% o similarity: the computed similarity between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(MAGICKCORE_HDRI_SUPPORT) && defined(MAGICKCORE_FFTW_DELEGATE)
static Image *CrossCorrelationImage(const Image *alpha_image,
const Image *beta_image,ExceptionInfo *exception)
{
Image
*clone_image,
*complex_conjugate,
*complex_multiplication,
*cross_correlation,
*fft_images;
/*
Take the FFT of beta image.
*/
clone_image=CloneImage(beta_image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return(clone_image);
(void) SetImageArtifact(clone_image,"fourier:normalize","inverse");
fft_images=ForwardFourierTransformImage(clone_image,MagickFalse,
exception);
clone_image=DestroyImageList(clone_image);
if (fft_images == (Image *) NULL)
return(fft_images);
/*
Take the complex conjugate of beta image.
*/
complex_conjugate=ComplexImages(fft_images,ConjugateComplexOperator,
exception);
fft_images=DestroyImageList(fft_images);
if (complex_conjugate == (Image *) NULL)
return(complex_conjugate);
/*
Take the FFT of the alpha image.
*/
clone_image=CloneImage(alpha_image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
{
complex_conjugate=DestroyImageList(complex_conjugate);
return(clone_image);
}
(void) SetImageArtifact(clone_image,"fourier:normalize","inverse");
fft_images=ForwardFourierTransformImage(clone_image,MagickFalse,exception);
clone_image=DestroyImageList(clone_image);
if (fft_images == (Image *) NULL)
{
complex_conjugate=DestroyImageList(complex_conjugate);
return(fft_images);
}
complex_conjugate->next->next=fft_images;
/*
Do complex multiplication.
*/
(void) SetImageArtifact(complex_conjugate,"compose:clamp","false");
complex_multiplication=ComplexImages(complex_conjugate,
MultiplyComplexOperator,exception);
complex_conjugate=DestroyImageList(complex_conjugate);
if (fft_images == (Image *) NULL)
return(fft_images);
/*
Do the IFT and return the cross-correlation result.
*/
cross_correlation=InverseFourierTransformImage(complex_multiplication,
complex_multiplication->next,MagickFalse,exception);
complex_multiplication=DestroyImageList(complex_multiplication);
return(cross_correlation);
}
static Image *NCCDivideImage(const Image *alpha_image,const Image *beta_image,
ExceptionInfo *exception)
{
CacheView
*alpha_view,
*beta_view;
Image
*divide_image;
MagickBooleanType
status;
ssize_t
y;
/*
Divide one image into another.
*/
divide_image=CloneImage(alpha_image,0,0,MagickTrue,exception);
if (divide_image == (Image *) NULL)
return(divide_image);
status=MagickTrue;
alpha_view=AcquireAuthenticCacheView(divide_image,exception);
beta_view=AcquireVirtualCacheView(beta_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(beta_image,divide_image,divide_image->rows,1)
#endif
for (y=0; y < (ssize_t) divide_image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(beta_view,0,y,beta_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(alpha_view,0,y,divide_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) divide_image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(divide_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(divide_image,i);
PixelTrait traits = GetPixelChannelTraits(divide_image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (fabs(p[i]) >= MagickEpsilon)
q[i]*=PerceptibleReciprocal(QuantumScale*p[i]);
}
p+=GetPixelChannels(beta_image);
q+=GetPixelChannels(divide_image);
}
if (SyncCacheViewAuthenticPixels(alpha_view,exception) == MagickFalse)
status=MagickFalse;
}
beta_view=DestroyCacheView(beta_view);
alpha_view=DestroyCacheView(alpha_view);
if (status == MagickFalse)
divide_image=DestroyImage(divide_image);
return(divide_image);
}
static MagickBooleanType NCCMaximaImage(const Image *image,double *maxima,
RectangleInfo *offset,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
/*
Identify the maxima value in the image and its location.
*/
status=MagickTrue;
*maxima=0.0;
offset->x=0;
offset->y=0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
sum = 0.0;
ssize_t
channels = 0,
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
sum+=p[i];
channels++;
}
if ((channels != 0) && ((sum/channels) > *maxima))
{
*maxima=sum/channels;
offset->x=x;
offset->y=y;
}
p+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
static MagickBooleanType NCCMultiplyImage(Image *image,const double factor,
const ChannelStatistics *channel_statistics,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
/*
Multiply each pixel by a factor.
*/
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (channel_statistics != (const ChannelStatistics *) NULL)
q[i]*=QuantumScale*channel_statistics[channel].standard_deviation;
q[i]*=factor;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
static Image *NCCSquareImage(const Image *image,ExceptionInfo *exception)
{
CacheView
*image_view;
Image
*square_image;
MagickBooleanType
status;
ssize_t
y;
/*
Square each pixel in the image.
*/
square_image=CloneImage(image,0,0,MagickTrue,exception);
if (square_image == (Image *) NULL)
return(square_image);
status=MagickTrue;
image_view=AcquireAuthenticCacheView(square_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(square_image,square_image,square_image->rows,1)
#endif
for (y=0; y < (ssize_t) square_image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,square_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) square_image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(square_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(square_image,i);
PixelTrait traits = GetPixelChannelTraits(square_image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]*=QuantumScale*q[i];
}
q+=GetPixelChannels(square_image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
square_image=DestroyImage(square_image);
return(square_image);
}
static Image *NCCSubtractImageMean(const Image *alpha_image,
const Image *beta_image,const ChannelStatistics *channel_statistics,
ExceptionInfo *exception)
{
CacheView
*beta_view,
*image_view;
Image
*gamma_image;
MagickBooleanType
status;
ssize_t
y;
/*
Subtract the image mean and pad.
*/
gamma_image=CloneImage(beta_image,alpha_image->columns,alpha_image->rows,
MagickTrue,exception);
if (gamma_image == (Image *) NULL)
return(gamma_image);
status=MagickTrue;
image_view=AcquireAuthenticCacheView(gamma_image,exception);
beta_view=AcquireVirtualCacheView(beta_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(beta_image,gamma_image,gamma_image->rows,1)
#endif
for (y=0; y < (ssize_t) gamma_image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(beta_view,0,y,beta_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(image_view,0,y,gamma_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) gamma_image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(gamma_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(gamma_image,i);
PixelTrait traits = GetPixelChannelTraits(gamma_image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if ((x >= (ssize_t) beta_image->columns) ||
(y >= (ssize_t) beta_image->rows))
q[i]=(Quantum) 0;
else
q[i]=p[i]-channel_statistics[channel].mean;
}
p+=GetPixelChannels(beta_image);
q+=GetPixelChannels(gamma_image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
beta_view=DestroyCacheView(beta_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
gamma_image=DestroyImage(gamma_image);
return(gamma_image);
}
static Image *NCCUnityImage(const Image *alpha_image,const Image *beta_image,
ExceptionInfo *exception)
{
CacheView
*image_view;
Image
*unity_image;
MagickBooleanType
status;
ssize_t
y;
/*
Create a padded unity image.
*/
unity_image=CloneImage(alpha_image,alpha_image->columns,alpha_image->rows,
MagickTrue,exception);
if (unity_image == (Image *) NULL)
return(unity_image);
status=MagickTrue;
image_view=AcquireAuthenticCacheView(unity_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(unity_image,unity_image,unity_image->rows,1)
#endif
for (y=0; y < (ssize_t) unity_image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,unity_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) unity_image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(unity_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(unity_image,i);
PixelTrait traits = GetPixelChannelTraits(unity_image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=QuantumRange;
if ((x >= (ssize_t) beta_image->columns) ||
(y >= (ssize_t) beta_image->rows))
q[i]=(Quantum) 0;
}
q+=GetPixelChannels(unity_image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
unity_image=DestroyImage(unity_image);
return(unity_image);
}
static Image *NCCVarianceImage(Image *alpha_image,const Image *beta_image,
ExceptionInfo *exception)
{
CacheView
*beta_view,
*image_view;
Image
*variance_image;
MagickBooleanType
status;
ssize_t
y;
/*
Compute the variance of the two images.
*/
variance_image=CloneImage(alpha_image,0,0,MagickTrue,exception);
if (variance_image == (Image *) NULL)
return(variance_image);
status=MagickTrue;
image_view=AcquireAuthenticCacheView(variance_image,exception);
beta_view=AcquireVirtualCacheView(beta_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(beta_image,variance_image,variance_image->rows,1)
#endif
for (y=0; y < (ssize_t) variance_image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(beta_view,0,y,beta_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(image_view,0,y,variance_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) variance_image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(variance_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(variance_image,i);
PixelTrait traits = GetPixelChannelTraits(variance_image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum((QuantumRange*sqrt(fabs((double) QuantumScale*
(q[i]-p[i])))))/sqrt((double) QuantumRange);
}
p+=GetPixelChannels(beta_image);
q+=GetPixelChannels(variance_image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
beta_view=DestroyCacheView(beta_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
variance_image=DestroyImage(variance_image);
return(variance_image);
}
static Image *NCCSimilarityImage(const Image *image,const Image *reference,
const MetricType metric,const double similarity_threshold,
RectangleInfo *offset,double *similarity_metric,ExceptionInfo *exception)
{
#define DestroySimilarityResources() \
{ \
if (channel_statistics != (ChannelStatistics *) NULL) \
channel_statistics=(ChannelStatistics *) \
RelinquishMagickMemory(channel_statistics); \
if (beta_image != (Image *) NULL) \
beta_image=DestroyImage(beta_image); \
if (gamma_image != (Image *) NULL) \
gamma_image=DestroyImage(gamma_image); \
if (ncc_image != (Image *) NULL) \
ncc_image=DestroyImage(ncc_image); \
if (normalize_image != (Image *) NULL) \
normalize_image=DestroyImage(normalize_image); \
if (square_image != (Image *) NULL) \
square_image=DestroyImage(square_image); \
if (unity_image != (Image *) NULL) \
unity_image=DestroyImage(unity_image); \
}
#define ThrowSimilarityException() \
{ \
DestroySimilarityResources() \
return((Image *) NULL); \
}
ChannelStatistics
*channel_statistics = (ChannelStatistics *) NULL;
double
maxima = 0.0;
Image
*beta_image = (Image *) NULL,
*correlation_image = (Image *) NULL,
*gamma_image = (Image *) NULL,
*ncc_image = (Image *) NULL,
*normalize_image = (Image *) NULL,
*square_image = (Image *) NULL,
*unity_image = (Image *) NULL;
MagickBooleanType
status;
RectangleInfo
geometry;
/*
Accelerated correlation-based image similary using FFT local statistics.
Contributed by Fred Weinhaus.
*/
square_image=NCCSquareImage(image,exception);
if (square_image == (Image *) NULL)
ThrowSimilarityException();
unity_image=NCCUnityImage(image,reference,exception);
if (unity_image == (Image *) NULL)
ThrowSimilarityException();
/*
Compute the cross correlation of the square and unity images.
*/
ncc_image=CrossCorrelationImage(square_image,unity_image,exception);
square_image=DestroyImage(square_image); \
if (ncc_image == (Image *) NULL)
ThrowSimilarityException();
status=NCCMultiplyImage(ncc_image,(double) QuantumRange*reference->columns*
reference->rows,(const ChannelStatistics *) NULL,exception);
if (status == MagickFalse)
ThrowSimilarityException();
/*
Compute the cross correlation of the source and unity images.
*/
gamma_image=CrossCorrelationImage(image,unity_image,exception);
unity_image=DestroyImage(unity_image);
if (gamma_image == (Image *) NULL)
ThrowSimilarityException();
square_image=NCCSquareImage(gamma_image,exception);
gamma_image=DestroyImage(gamma_image);
status=NCCMultiplyImage(square_image,(double) QuantumRange,
(const ChannelStatistics *) NULL,exception);
if (status == MagickFalse)
ThrowSimilarityException();
/*
Compute the variance of the two images.
*/
gamma_image=NCCVarianceImage(ncc_image,square_image,exception);
square_image=DestroyImage(square_image);
ncc_image=DestroyImage(ncc_image);
if (gamma_image == (Image *) NULL)
ThrowSimilarityException();
channel_statistics=GetImageStatistics(reference,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
ThrowSimilarityException();
/*
Subtract the image mean.
*/
status=NCCMultiplyImage(gamma_image,1.0,channel_statistics,exception);
if (status == MagickFalse)
ThrowSimilarityException();
normalize_image=NCCSubtractImageMean(image,reference,channel_statistics,
exception);
if (normalize_image == (Image *) NULL)
ThrowSimilarityException();
ncc_image=CrossCorrelationImage(image,normalize_image,exception);
normalize_image=DestroyImage(normalize_image);
if (ncc_image == (Image *) NULL)
ThrowSimilarityException();
/*
Divide the two images.
*/
beta_image=NCCDivideImage(ncc_image,gamma_image,exception);
ncc_image=DestroyImage(ncc_image);
gamma_image=DestroyImage(gamma_image);
if (beta_image == (Image *) NULL)
ThrowSimilarityException();
(void) ResetImagePage(beta_image,"0x0+0+0");
SetGeometry(image,&geometry);
geometry.width=image->columns-reference->columns;
geometry.height=image->rows-reference->rows;
/*
Crop padding.
*/
correlation_image=CropImage(beta_image,&geometry,exception);
beta_image=DestroyImage(beta_image);
if (correlation_image == (Image *) NULL)
ThrowSimilarityException();
(void) ResetImagePage(correlation_image,"0x0+0+0");
/*
Identify the maxima value in the image and its location.
*/
status=GrayscaleImage(correlation_image,AveragePixelIntensityMethod,
exception);
if (status == MagickFalse)
ThrowSimilarityException();
status=NCCMaximaImage(correlation_image,&maxima,offset,exception);
if (status == MagickFalse)
{
correlation_image=DestroyImage(correlation_image);
ThrowSimilarityException();
}
*similarity_metric=1.0-QuantumScale*maxima;
DestroySimilarityResources();
return(correlation_image);
}
#endif
static double GetSimilarityMetric(const Image *image,const Image *reference,
const MetricType metric,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
double
distortion;
Image
*similarity_image;
MagickBooleanType
status;
RectangleInfo
geometry;
SetGeometry(reference,&geometry);
geometry.x=x_offset;
geometry.y=y_offset;
similarity_image=CropImage(image,&geometry,exception);
if (similarity_image == (Image *) NULL)
return(0.0);
distortion=0.0;
status=GetImageDistortion(similarity_image,reference,metric,&distortion,
exception);
similarity_image=DestroyImage(similarity_image);
if (status == MagickFalse)
return(0.0);
return(distortion);
}
MagickExport Image *SimilarityImage(const Image *image,const Image *reference,
const MetricType metric,const double similarity_threshold,
RectangleInfo *offset,double *similarity_metric,ExceptionInfo *exception)
{
#define SimilarityImageTag "Similarity/Image"
CacheView
*similarity_view;
Image
*similarity_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(offset != (RectangleInfo *) NULL);
SetGeometry(reference,offset);
*similarity_metric=MagickMaximumValue;
#if defined(MAGICKCORE_HDRI_SUPPORT) && defined(MAGICKCORE_FFTW_DELEGATE)
{
const char *artifact = GetImageArtifact(image,"compare:accelerate-ncc");
MagickBooleanType accelerate = (artifact != (const char *) NULL) &&
(IsStringTrue(artifact) == MagickFalse) ? MagickFalse : MagickTrue;
if ((accelerate != MagickFalse) &&
(metric == NormalizedCrossCorrelationErrorMetric))
{
similarity_image=NCCSimilarityImage(image,reference,metric,
similarity_threshold,offset,similarity_metric,exception);
return(similarity_image);
}
}
#endif
similarity_image=CloneImage(image,image->columns-reference->columns+1,
image->rows-reference->rows+1,MagickTrue,exception);
if (similarity_image == (Image *) NULL)
return((Image *) NULL);
status=SetImageStorageClass(similarity_image,DirectClass,exception);
if (status == MagickFalse)
{
similarity_image=DestroyImage(similarity_image);
return((Image *) NULL);
}
(void) SetImageAlphaChannel(similarity_image,DeactivateAlphaChannel,
exception);
/*
Measure similarity of reference image against image.
*/
status=MagickTrue;
progress=0;
similarity_view=AcquireAuthenticCacheView(similarity_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
shared(progress,status,similarity_metric) \
magick_number_threads(image,image,image->rows-reference->rows+1,1)
#endif
for (y=0; y < (ssize_t) (image->rows-reference->rows+1); y++)
{
double
similarity;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp flush(similarity_metric)
#endif
if (*similarity_metric <= similarity_threshold)
continue;
q=GetCacheViewAuthenticPixels(similarity_view,0,y,similarity_image->columns,
1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) (image->columns-reference->columns+1); x++)
{
ssize_t
i;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp flush(similarity_metric)
#endif
if (*similarity_metric <= similarity_threshold)
break;
similarity=GetSimilarityMetric(image,reference,metric,x,y,exception);
if ((metric == NormalizedCrossCorrelationErrorMetric) ||
(metric == UndefinedErrorMetric))
similarity=1.0-similarity;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SimilarityImage)
#endif
if (similarity < *similarity_metric)
{
offset->x=x;
offset->y=y;
*similarity_metric=similarity;
}
if (metric == PerceptualHashErrorMetric)
similarity=MagickMin(0.01*similarity,1.0);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait similarity_traits=GetPixelChannelTraits(similarity_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(similarity_traits == UndefinedPixelTrait) ||
((similarity_traits & UpdatePixelTrait) == 0))
continue;
SetPixelChannel(similarity_image,channel,ClampToQuantum(QuantumRange-
QuantumRange*similarity),q);
}
q+=GetPixelChannels(similarity_image);
}
if (SyncCacheViewAuthenticPixels(similarity_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SimilarityImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
similarity_view=DestroyCacheView(similarity_view);
if (status == MagickFalse)
similarity_image=DestroyImage(similarity_image);
return(similarity_image);
}
|
ep.c | /*--------------------------------------------------------------------
NAS Parallel Benchmarks 2.3 OpenMP C versions - EP
This benchmark is an OpenMP C version of the NPB EP code.
The OpenMP C versions are developed by RWCP and derived from the serial
Fortran versions in "NPB 2.3-serial" developed by NAS.
Permission to use, copy, distribute and modify this software for any
purpose with or without fee is hereby granted.
This software is provided "as is" without express or implied warranty.
Send comments on the OpenMP C versions to pdp-openmp@rwcp.or.jp
Information on OpenMP activities at RWCP is available at:
http://pdplab.trc.rwcp.or.jp/pdperf/Omni/
Information on NAS Parallel Benchmarks 2.3 is available at:
http://www.nas.nasa.gov/NAS/NPB/
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
Author: P. O. Frederickson
D. H. Bailey
A. C. Woo
OpenMP C version: S. Satoh
--------------------------------------------------------------------*/
#include "npb-C.h"
#include "npbparams.h"
/* parameters */
#define MK 16
#define MM (M - MK)
#define NN (1 << MM)
#define NK (1 << MK)
#define NQ 10
#define EPSILON 1.0e-8
#define A 1220703125.0
#define S 271828183.0
#define TIMERS_ENABLED FALSE
/* global variables */
/* common /storage/ */
static double x[2*NK];
#pragma omp threadprivate(x)
static double q[NQ];
/*--------------------------------------------------------------------
program EMBAR
c-------------------------------------------------------------------*/
/*
c This is the serial version of the APP Benchmark 1,
c the "embarassingly parallel" benchmark.
c
c M is the Log_2 of the number of complex pairs of uniform (0, 1) random
c numbers. MK is the Log_2 of the size of each batch of uniform random
c numbers. MK can be set for convenience on a given system, since it does
c not affect the results.
*/
int main(int argc, char **argv) {
double Mops, t1, t2, t3, t4, x1, x2, sx, sy, tm, an, tt, gc;
double dum[3] = { 1.0, 1.0, 1.0 };
int np, ierr, node, no_nodes, i, ik, kk, l, k, nit, ierrcode,
no_large_nodes, np_add, k_offset, j;
int nthreads = 1;
boolean verified;
char size[13+1]; /* character*13 */
/*
c Because the size of the problem is too large to store in a 32-bit
c integer for some classes, we put it into a string (for printing).
c Have to strip off the decimal point put in there by the floating
c point print statement (internal file)
*/
#ifndef POSIX
#ifndef NOBOMP
bomp_custom_init();
#endif
#endif
omp_set_num_threads(1);
printf("\n\n NAS Parallel Benchmarks 2.3 OpenMP C version"
" - EP Benchmark\n");
sprintf(size, "%12.0f", pow(2.0, M+1));
for (j = 13; j >= 1; j--) {
if (size[j] == '.') size[j] = ' ';
}
printf(" Number of random numbers generated: %13s\n", size);
verified = FALSE;
/*
c Compute the number of "batches" of random number pairs generated
c per processor. Adjust if the number of processors does not evenly
c divide the total number
*/
np = NN;
/*
c Call the random number generator functions and initialize
c the x-array to reduce the effects of paging on the timings.
c Also, call all mathematical functions that are used. Make
c sure these initializations cannot be eliminated as dead code.
*/
vranlc(0, &(dum[0]), dum[1], &(dum[2]));
dum[0] = randlc(&(dum[1]), dum[2]);
for (i = 0; i < 2*NK; i++)
{
x[i] = -1.0e99;
}
printf("Reached here ");
Mops = log(sqrt(fabs(max(1.0, 1.0))));
timer_clear(1);
timer_clear(2);
timer_clear(3);
timer_start(1);
vranlc(0, &t1, A, x);
/* Compute AN = A ^ (2 * NK) (mod 2^46). */
t1 = A;
for ( i = 1; i <= MK+1; i++) {
t2 = randlc(&t1, t1);
}
an = t1;
tt = S;
gc = 0.0;
sx = 0.0;
sy = 0.0;
for ( i = 0; i <= NQ - 1; i++) {
q[i] = 0.0;
}
/*
c Each instance of this loop may be performed independently. We compute
c the k offsets separately to take into account the fact that some nodes
c have more numbers to generate than others
*/
k_offset = -1;
#pragma omp parallel copyin(x)
{
double t1, t2, t3, t4, x1, x2;
int kk, i, ik, l;
double qq[NQ]; /* private copy of q[0:NQ-1] */
for (i = 0; i < NQ; i++) qq[i] = 0.0;
#pragma omp for reduction(+:sx,sy) schedule(static)
for (k = 1; k <= np; k++) {
kk = k_offset + k;
t1 = S;
t2 = an;
/* Find starting seed t1 for this kk. */
for (i = 1; i <= 100; i++) {
ik = kk / 2;
if (2 * ik != kk) t3 = randlc(&t1, t2);
if (ik == 0) break;
t3 = randlc(&t2, t2);
kk = ik;
}
/* Compute uniform pseudorandom numbers. */
if (TIMERS_ENABLED == TRUE) timer_start(3);
vranlc(2*NK, &t1, A, x-1);
if (TIMERS_ENABLED == TRUE) timer_stop(3);
/*
c Compute Gaussian deviates by acceptance-rejection method and
c tally counts in concentric square annuli. This loop is not
c vectorizable.
*/
if (TIMERS_ENABLED == TRUE) timer_start(2);
for ( i = 0; i < NK; i++) {
x1 = 2.0 * x[2*i] - 1.0;
x2 = 2.0 * x[2*i+1] - 1.0;
t1 = pow2(x1) + pow2(x2);
if (t1 <= 1.0) {
t2 = sqrt(-2.0 * log(t1) / t1);
t3 = (x1 * t2); /* Xi */
t4 = (x2 * t2); /* Yi */
l = max(fabs(t3), fabs(t4));
qq[l] += 1.0; /* counts */
sx = sx + t3; /* sum of Xi */
sy = sy + t4; /* sum of Yi */
}
}
if (TIMERS_ENABLED == TRUE) timer_stop(2);
}
#pragma omp critical
{
for (i = 0; i <= NQ - 1; i++) q[i] += qq[i];
}
#if defined(_OPENMP)
#pragma omp master
nthreads = omp_get_num_threads();
#endif /* _OPENMP */
} /* end of parallel region */
for (i = 0; i <= NQ-1; i++) {
gc = gc + q[i];
}
timer_stop(1);
tm = timer_read(1);
nit = 0;
if (M == 24) {
if((fabs((sx- (-3.247834652034740e3))/sx) <= EPSILON) &&
(fabs((sy- (-6.958407078382297e3))/sy) <= EPSILON)) {
verified = TRUE;
}
} else if (M == 25) {
if ((fabs((sx- (-2.863319731645753e3))/sx) <= EPSILON) &&
(fabs((sy- (-6.320053679109499e3))/sy) <= EPSILON)) {
verified = TRUE;
}
} else if (M == 28) {
if ((fabs((sx- (-4.295875165629892e3))/sx) <= EPSILON) &&
(fabs((sy- (-1.580732573678431e4))/sy) <= EPSILON)) {
verified = TRUE;
}
} else if (M == 30) {
if ((fabs((sx- (4.033815542441498e4))/sx) <= EPSILON) &&
(fabs((sy- (-2.660669192809235e4))/sy) <= EPSILON)) {
verified = TRUE;
}
} else if (M == 32) {
if ((fabs((sx- (4.764367927995374e4))/sx) <= EPSILON) &&
(fabs((sy- (-8.084072988043731e4))/sy) <= EPSILON)) {
verified = TRUE;
}
}
Mops = pow(2.0, M+1)/tm/1000000.0;
printf("EP Benchmark Results: \n"
"CPU Time = %10.4f\n"
"N = 2^%5d\n"
"No. Gaussian Pairs = %15.0f\n"
"Sums = %25.15e %25.15e\n"
"Counts:\n",
tm, M, gc, sx, sy);
for (i = 0; i <= NQ-1; i++) {
printf("%3d %15.0f\n", i, q[i]);
}
c_print_results("EP", CLASS, M+1, 0, 0, nit, nthreads,
tm, Mops,
"Random numbers generated",
verified, NPBVERSION, COMPILETIME,
CS1, CS2, CS3, CS4, CS5, CS6, CS7);
if (TIMERS_ENABLED == TRUE) {
printf("Total time: %f", timer_read(1));
printf("Gaussian pairs: %f", timer_read(2));
printf("Random numbers: %f", timer_read(3));
}
}
|
mgl.h | /***************************************************************************
* mgl.h is part of Math Graphic Library
* Copyright (C) 2007-2016 Alexey Balakin <mathgl.abalakin@gmail.ru> *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program; if not, write to the *
* Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
***************************************************************************/
#ifndef _MGL_H_
#define _MGL_H_
#include "mgl2/mgl_cf.h"
#ifdef __cplusplus
#include "mgl2/data.h"
#include "mgl2/datac.h"
#include <sys/stat.h>
//-----------------------------------------------------------------------------
/// Wrapper class for all graphics
class MGL_EXPORT mglGraph
{
mglGraph(const mglGraph &) {} // copying is not allowed
const mglGraph &operator=(const mglGraph &t) { return t; }
protected:
HMGL gr;
public:
HMPR pr; ///< Pointer to associated MGL parser
mglGraph(int kind=0, int width=600, int height=400)
{ pr = NULL;
if(kind==-1) gr=NULL;
#if MGL_HAVE_OPENGL
else if(kind==1) gr=mgl_create_graph_gl();
#else
else if(kind==1)
{ gr=mgl_create_graph(width, height);
SetGlobalWarn("OpenGL support was disabled. Please, enable it and rebuild MathGL."); }
#endif
else gr=mgl_create_graph(width, height);
}
mglGraph(HMGL graph)
{ pr = NULL; gr = graph; mgl_use_graph(gr,1); }
virtual ~mglGraph()
{ if(mgl_use_graph(gr,-1)<1) mgl_delete_graph(gr); }
/// Get pointer to internal HMGL object
inline HMGL Self() { return gr; }
/// Set default parameters for plotting
inline void DefaultPlotParam() { mgl_set_def_param(gr); }
/// Set name of plot for saving filename
inline void SetPlotId(const char *id) { mgl_set_plotid(gr,id); }
/// Get name of plot for saving filename
inline const char *GetPlotId() { return mgl_get_plotid(gr); }
/// Ask to stop drawing
inline void Stop(bool stop=true) { mgl_ask_stop(gr, stop); }
/// Check if plot termination is asked
inline bool NeedStop() { return mgl_need_stop(gr); }
/// Set callback function for event processing
inline void SetEventFunc(void (*func)(void *), void *par=NULL)
{ mgl_set_event_func(gr, func, par); }
/// Set the transparency on/off.
inline void Alpha(bool enable) { mgl_set_alpha(gr, enable); }
/// Set the gray-scale mode on/off.
inline void Gray(bool enable) { mgl_set_gray(gr, enable); }
/// Set default value of alpha-channel
inline void SetAlphaDef(double alpha) { mgl_set_alpha_default(gr, alpha); }
/// Set the transparency type (0 - usual, 1 - glass, 2 - lamp)
inline void SetTranspType(int type) { mgl_set_transp_type(gr, type); }
/// Set the size of semi-transparent area around lines, marks, glyphs, ... Default is 1.
inline void SetPenDelta(double d) { mgl_pen_delta(gr,d); }
/// Set the using of light on/off.
inline void Light(bool enable) { mgl_set_light(gr, enable); }
/// Switch on/off the specified light source.
inline void Light(int n,bool enable) { mgl_set_light_n(gr, n, enable); }
/// Use diffusive light (only for local light sources) -- OBSOLETE
inline void SetDifLight(bool dif) { mgl_set_light_dif(gr, dif); }
/// Set to attach light settings to inplot.
inline void AttachLight(bool enable) { mgl_set_attach_light(gr, enable); }
/// Add a light source.
inline void AddLight(int n, mglPoint p, char col='w', double bright=0.5, double ap=0)
{ mgl_add_light_ext(gr, n, p.x, p.y, p.z, col, bright, ap); }
inline void AddLight(int n, mglPoint r, mglPoint p, char col='w', double bright=0.5, double ap=0)
{ mgl_add_light_loc(gr, n, r.x, r.y, r.z, p.x, p.y, p.z, col, bright, ap); }
/// Set ambient light brightness
inline void SetAmbient(double i) { mgl_set_ambbr(gr, i); }
/// Set diffusive light brightness
inline void SetDiffuse(double i) { mgl_set_difbr(gr, i); }
/// Set the fog distance or switch it off (if d=0).
inline void Fog(double d, double dz=0.25) { mgl_set_fog(gr, d, dz); }
/// Set relative width of rectangles in Bars, Barh, BoxPlot, Candle, OHLC (default is 0.7)
inline void SetBarWidth(double width) { mgl_set_bar_width(gr, width); }
/// Set default size of marks (locally you can use "size" option)
inline void SetMarkSize(double size) { mgl_set_mark_size(gr, size); }
/// Set default size of arrows (locally you can use "size" option)
inline void SetArrowSize(double size) { mgl_set_arrow_size(gr, size); }
/// Set number of mesh lines (use 0 to draw all of them)
inline void SetMeshNum(int num) { mgl_set_meshnum(gr, num); }
/// Set number of visible faces (use 0 to draw all of them)
inline void SetFaceNum(int num) { mgl_set_facenum(gr, num); }
/// Set cutting for points outside of bounding box
inline void SetCut(bool cut) { mgl_set_cut(gr, cut); }
/// Set additional cutting box
inline void SetCutBox(mglPoint p1, mglPoint p2)
{ mgl_set_cut_box(gr, p1.x, p1.y, p1.z, p2.x, p2.y, p2.z); }
/// Set the cutting off condition (formula)
inline void CutOff(const char *EqC) { mgl_set_cutoff(gr, EqC); }
/// Set default font size
inline void SetFontSize(double size) { mgl_set_font_size(gr, size); }
/// Set default font style and color
inline void SetFontDef(const char *fnt) { mgl_set_font_def(gr, fnt); }
/// Set FontSize by size in pt and picture DPI (default is 16 pt for dpi=72)
virtual void SetFontSizePT(double pt, int dpi=72) { SetFontSize(pt*27.f/dpi); }
/// Set FontSize by size in centimeters and picture DPI (default is 0.56 cm = 16 pt)
inline void SetFontSizeCM(double cm, int dpi=72) { SetFontSizePT(cm*28.45f,dpi); }
/// Set FontSize by size in inch and picture DPI (default is 0.22 in = 16 pt)
inline void SetFontSizeIN(double in, int dpi=72) { SetFontSizePT(in*72.27f,dpi); }
/// Load font from file
inline void LoadFont(const char *name, const char *path=NULL)
{ mgl_load_font(gr, name, path); }
/// Copy font from another mglGraph instance
inline void CopyFont(const mglGraph *GR) { mgl_copy_font(gr, GR->gr);}
/// Restore font (load default font for new HMGL objects)
inline void RestoreFont() { mgl_restore_font(gr); }
/// Set to use or not text rotation
inline void SetRotatedText(bool enable) { mgl_set_rotated_text(gr, enable); }
/// Set to scale text in relative subplots too
inline void SetScaleText(bool enable) { mgl_set_scale_text(gr, enable); }
/// Set default font for all new HMGL and mglGraph objects
static inline void SetDefFont(const char *name, const char *path=NULL) { mgl_def_font(name,path); }
/// Add user-defined glyph for symbol and set its optional id
inline void DefineSymbol(char id, const mglDataA &x, const mglDataA &y)
{ mgl_define_symbol(gr, id, &x, &y); }
/// Set default palette
inline void SetPalette(const char *colors) { mgl_set_palette(gr, colors); }
/// Set default color scheme
inline void SetDefScheme(const char *sch) { mgl_set_def_sch(gr, sch); }
/// Sets RGB values for color with given id
static inline void SetColor(char id, double r, double g, double b) { mgl_set_color(id, r, g, b); }
/// Set mask for face coloring as array of type 'unsigned char[8]'
static inline void SetMask(char id, const char *mask) { mgl_set_mask(id, mask); }
/// Set mask for face coloring as uint64_t number
static inline void SetMask(char id, uint64_t mask) { mgl_set_mask_val(id, mask); }
/// Set default mask rotation angle
inline void SetMaskAngle(int angle) { mgl_set_mask_angle(gr, angle); }
/// Get last warning code
inline int GetWarn() { return mgl_get_warn(gr);}
/// Set warning code ant fill message
inline void SetWarn(int code, const char *info) { mgl_set_warn(gr,code,info); }
/// Get text of warning message(s)
inline const char *Message() { return mgl_get_mess(gr); }
/// Set global warning message
static inline void SetGlobalWarn(const char *text) { mgl_set_global_warn(text); }
/// Get text of global warning message(s)
static inline const char *GlobalWarn() { return mgl_get_global_warn(); }
/// Suppress printing warnings to stderr
static inline void SuppressWarn(bool on) { mgl_suppress_warn(on); }
/// Check if MathGL version is valid (return false) or not (return true)
static inline bool CheckVersion(const char *ver) { return mgl_check_version(ver); }
/// Display progress of something.
inline void Progress(int value, int maximal) { mgl_progress(value, maximal, gr); }
/// Set axis range scaling -- simplified way to shift/zoom axis range -- need to replot whole image!
inline void ZoomAxis(mglPoint p1=mglPoint(0,0,0,0), mglPoint p2=mglPoint(1,1,1,1))
{ mgl_zoom_axis(gr, p1.x,p1.y,p1.z,p1.c, p2.x,p2.y,p2.z,p2.c); }
/// Add [v1, v2] to the current range in direction dir
inline void AddRange(char dir, double v1, double v2)
{ mgl_add_range_val(gr, dir, v1, v2); }
/// Set range in direction dir as [v1, v2]
inline void SetRange(char dir, double v1, double v2)
{ mgl_set_range_val(gr, dir, v1, v2); }
/// Set range in direction dir as minimal and maximal values of data a
inline void SetRange(char dir, const mglDataA &dat, bool add=false)
{ mgl_set_range_dat(gr, dir, &dat, add); }
/// Set values of axis range as minimal and maximal values of corresponding data
inline void SetRanges(const mglDataA &xx, const mglDataA &yy, const mglDataA &zz, const mglDataA &cc)
{ mgl_set_range_dat(gr,'x',&xx,0); mgl_set_range_dat(gr,'y',&yy,0);
mgl_set_range_dat(gr,'z',&zz,0); mgl_set_range_dat(gr,'c',&cc,0); }
/// Set values of axis range as minimal and maximal values of corresponding data
inline void SetRanges(const mglDataA &xx, const mglDataA &yy, const mglDataA &zz)
{ mgl_set_range_dat(gr,'x',&xx,0); mgl_set_range_dat(gr,'y',&yy,0);
mgl_set_range_dat(gr,'z',&zz,0); mgl_set_range_dat(gr,'c',&zz,0); }
/// Set values of axis range as minimal and maximal values of corresponding data
inline void SetRanges(const mglDataA &xx, const mglDataA &yy)
{ mgl_set_range_dat(gr,'x',&xx,0); mgl_set_range_dat(gr,'y',&yy,0); }
/// Set values of axis ranges
inline void SetRanges(double x1, double x2, double y1, double y2, double z1=0, double z2=0)
{ mgl_set_ranges(gr, x1, x2, y1, y2, z1, z2); }
/// Set values of axis ranges
inline void SetRanges(mglPoint p1, mglPoint p2)
{ mgl_set_ranges(gr, p1.x, p2.x, p1.y, p2.y, p1.z, p2.z); }
/// Set ranges for automatic variables
inline void SetAutoRanges(double x1, double x2, double y1=0, double y2=0, double z1=0, double z2=0, double c1=0, double c2=0)
{ mgl_set_auto_ranges(gr, x1, x2, y1, y2, z1, z2, c1, c2); }
/// Set ranges for automatic variables
inline void SetAutoRanges(mglPoint p1, mglPoint p2)
{ mgl_set_auto_ranges(gr, p1.x, p2.x, p1.y, p2.y, p1.z, p2.z, p1.c, p2.c); }
/// Set axis origin
inline void SetOrigin(mglPoint p)
{ mgl_set_origin(gr, p.x, p.y, p.z); }
inline void SetOrigin(double x0, double y0, double z0=mglNaN)
{ mgl_set_origin(gr, x0, y0, z0); }
/// Set the transformation formulas for coordinate. Use "" or NULL for built-in ones
inline void SetFunc(const char *EqX, const char *EqY, const char *EqZ=NULL, const char *EqA=NULL)
{ mgl_set_func(gr, EqX, EqY, EqZ, EqA); }
/// Set one of predefined transformation rule
inline void SetCoor(int how) { mgl_set_coor(gr, how); }
/// Set to draw Ternary axis (triangle like axis, grid and so on)
/** val=1 for Ternary axis (a+b+c=1, z=z),
* val=2 for Quaternary axis (a+b+c+d=1),
* val|4 for projections. */
inline void Ternary(int val) { mgl_set_ternary(gr, val); }
/// Set to use or not tick labels rotation
inline void SetTickRotate(bool val) { mgl_set_tick_rotate(gr,val); }
/// Set to use or not tick labels skipping
inline void SetTickSkip(bool val) { mgl_set_tick_skip(gr,val); }
/// Set tick length
inline void SetTickLen(double len, double stt=1)
{ mgl_set_tick_len(gr, len, stt); }
/// Set axis and ticks style
inline void SetAxisStl(const char *stl="k", const char *tck=0, const char *sub=0)
{ mgl_set_axis_stl(gr, stl, tck, sub); }
/// Set time templates for ticks
inline void SetTicksTime(char dir, double d=0, const char *t="")
{ mgl_set_ticks_time(gr,dir,d,t); }
/// Set ticks text (\n separated). Use "" to disable this feature.
inline void SetTicksVal(char dir, const char *lbl, bool add=false)
{ mgl_set_ticks_str(gr,dir,lbl,add); }
inline void SetTicksVal(char dir, const wchar_t *lbl, bool add=false)
{ mgl_set_ticks_wcs(gr,dir,lbl,add); }
/// Set ticks position and text (\n separated). Use "" to disable this feature.
inline void SetTicksVal(char dir, const mglDataA &v, const char *lbl, bool add=false)
{ mgl_set_ticks_val(gr,dir,&v,lbl,add); }
inline void SetTicksVal(char dir, const mglDataA &v, const wchar_t *lbl, bool add=false)
{ mgl_set_ticks_valw(gr,dir,&v,lbl,add); }
/// Add manual tick at given position. Use "" to disable this feature.
inline void AddTick(char dir, double val, const char *lbl)
{ mgl_add_tick(gr,dir,val,lbl); }
inline void AddTick(char dir, double val, const wchar_t *lbl)
{ mgl_add_tickw(gr,dir,val,lbl); }
/// Set the ticks parameters and string for its factor
inline void SetTicks(char dir, double d=0, int ns=0, double org=mglNaN, const char *factor="")
{ mgl_set_ticks_fact(gr, dir, d, ns, org, factor); }
inline void SetTicks(char dir, double d, int ns, double org, const wchar_t *factor)
{ mgl_set_ticks_factw(gr, dir, d, ns, org, factor); }
/// Auto adjust ticks
inline void Adjust(const char *dir="xyzc")
{ mgl_adjust_ticks(gr, dir); }
/// Set templates for ticks
inline void SetTickTempl(char dir, const char *t)
{ mgl_set_tick_templ(gr,dir,t); }
inline void SetTickTempl(char dir, const wchar_t *t)
{ mgl_set_tick_templw(gr,dir,t); }
/// Tune ticks (tune|1 for common multiplier, tune|2 for common component)
inline void SetTuneTicks(int tune, double fact_pos=1.15)
{ mgl_tune_ticks(gr, tune, fact_pos); }
/// Set additional shift of tick labels
inline void SetTickShift(mglPoint p)
{ mgl_set_tick_shift(gr,p.x,p.y,p.z,p.c); }
/// Set to use UTC time instead of local time
inline void SetTimeUTC(bool enable)
{ mgl_set_flag(gr,enable, MGL_USE_GMTIME); }
/// Set to draw tick labels at axis origin
inline void SetOriginTick(bool enable=true)
{ mgl_set_flag(gr,!enable, MGL_NO_ORIGIN); }
/// Set bit-value flag of HMGL state (for advanced users only)
inline void SetFlagAdv(int val, uint32_t flag)
{ mgl_set_flag(gr, val, flag); }
/// Put further plotting in m-th cell of nx*ny grid of the image.
/** String \a style may contain:
* '<' for reserving space at left
* '>' for reserving space at right
* '^' for reserving space at top
* '_' for reserving space at bottom
* '#' for using whole region. */
inline void SubPlot(int nx,int ny,int m,const char *style="<>_^", double dx=0, double dy=0)
{ mgl_subplot_d(gr, nx, ny, m, style, dx, dy); }
/// Put further plotting in rectangle of dx*dy cells starting from m-th cell of nx*ny grid of the image and shift it by distance {sx,sy}.
/** String \a style may contain:
* '<' for reserving space at left
* '>' for reserving space at right
* '^' for reserving space at top
* '_' for reserving space at bottom
* '#' for using whole region. */
inline void MultiPlot(int nx,int ny,int m, int dx, int dy, const char *style="<>_^", double sx=0, double sy=0)
{ mgl_multiplot_d(gr, nx, ny, m, dx, dy, style, sx, sy); }
/// Put further plotting in a region [x1,x2]*[y1,y2] of the image or subplot (x1,x2,y1,y2 in range [0, 1]).
inline void InPlot(double x1,double x2,double y1,double y2, bool rel=true)
{ if(rel) mgl_relplot(gr, x1, x2, y1, y2);
else mgl_inplot(gr, x1, x2, y1, y2); }
/// Put further plotting in column cell of previous subplot
inline void ColumnPlot(int num, int ind, double d=0)
{ mgl_columnplot(gr,num,ind,d); }
/// Put further plotting in matrix cell of previous subplot
inline void GridPlot(int nx, int ny, int ind, double d=0)
{ mgl_gridplot(gr,nx,ny,ind,d); }
/// Put further plotting in cell of stick rotated on angles tet, phi
inline void StickPlot(int num, int i, double tet, double phi)
{ mgl_stickplot(gr,num,i,tet,phi); }
/// Put further plotting in cell of stick sheared on sx, sy.
inline void ShearPlot(int num, int i, mreal sx, mreal sy, mreal xd=1, mreal yd=0)
{ mgl_shearplot(gr,num,i,sx,sy,xd,yd); }
/// Set factor of plot size
inline void SetPlotFactor(double val)
{ mgl_set_plotfactor(gr,val); }
/// Push transformation matrix into stack
inline void Push() { mgl_mat_push(gr); }
/// Pop transformation matrix from stack
inline void Pop() { mgl_mat_pop(gr); }
/// Add title for current subplot/inplot
/** Style '#' draw box around the title. */
inline void Title(const char *title,const char *stl="",double size=-2)
{ mgl_title(gr,title,stl,size); }
/// Add title for current subplot/inplot
/** Style '#' draw box around the title. */
inline void Title(const wchar_t *title,const char *stl="",double size=-2)
{ mgl_titlew(gr,title,stl,size); }
/// Set aspect ratio for further plotting.
inline void Aspect(double Ax,double Ay,double Az=1)
{ mgl_aspect(gr, Ax, Ay, Az); }
/// Shear a further plotting.
inline void Shear(double Sx,double Sy)
{ mgl_shear(gr, Sx, Sy); }
/// Rotate a further plotting.
inline void Rotate(double TetX,double TetZ=0,double TetY=0)
{ mgl_rotate(gr, TetX, TetZ, TetY); }
/// Rotate a further plotting around vector {x,y,z}.
inline void RotateN(double Tet,double x,double y,double z)
{ mgl_rotate_vector(gr, Tet, x, y, z); }
/// Set perspective (in range [0,1)) for plot. Set to zero for switching off.
inline void Perspective(double val)
{ mgl_perspective(gr, val); }
/// Set angle of view independently from Rotate().
inline void View(double TetX,double TetZ=0,double TetY=0)
{ mgl_view(gr, TetX, TetZ, TetY); }
/// Set angle of view independently from Rotate().
inline void ViewAsRotate(double TetZ,double TetX,double TetY=0)
{ mgl_view(gr, -TetX, -TetZ, -TetY); }
/// Zoom in/out a part of picture (use Zoom(0, 0, 1, 1) for restore default)
inline void Zoom(double x1, double y1, double x2, double y2)
{ mgl_zoom(gr, x1, y1, x2, y2); }
/// Set size of frame in pixels. Normally this function is called internally.
inline void SetSize(int width, int height, bool clf=true)
{ if(clf) mgl_set_size(gr, width, height);
else mgl_scale_size(gr, width, height); }
/// Scaling for all further set size calls.
static inline void SetSizeScl(double scl) { mgl_set_size_scl(scl); }
/// Set plot quality
/** qual=0 -- no face drawing (fastest),
* qual=1 -- no color interpolation (fast),
* qual=2 -- high quality (normal),
* qual|4 -- direct bitmap drawing (low memory usage);
* qual|8 for dots drawing instead of primitives (extremely fast). */
inline void SetQuality(int qual=MGL_DRAW_NORM) { mgl_set_quality(gr, qual); }
/// Get plot quality
inline int GetQuality() { return mgl_get_quality(gr); }
/// Set drawing region for Quality&4
inline void SetDrawReg(long nx=1, long ny=1, long m=0) { mgl_set_draw_reg(gr,nx,ny,m); }
/// Start group of objects
inline void StartGroup(const char *name) { mgl_start_group(gr, name); }
/// End group of objects
inline void EndGroup() { mgl_end_group(gr); }
/// Highlight objects with given id
inline void Highlight(int id) { mgl_highlight(gr, id); }
/// Set boundary box for export graphics into 2D file formats.
/** If x2<0 (y2<0) then full width (height) will be used.
* If x1<0 or y1<0 or x1>=x2|Width or y1>=y2|Height then cropping will be disabled. */
inline void SetBBox(int x1=0, int y1=0, int x2=-1, int y2=-1)
{ mgl_set_bbox(gr,x1,y1,x2,y2); }
/// Show current image
inline void ShowImage(const char *viewer, bool keep=0)
{ mgl_show_image(gr, viewer, keep); }
/// Write the frame in file (depending extension, write current frame if fname is empty)
inline void WriteFrame(const char *fname=0,const char *descr="")
{ mgl_write_frame(gr, fname, descr); }
/// Write the frame in file using JPEG format
inline void WriteJPEG(const char *fname,const char *descr="")
{ mgl_write_jpg(gr, fname, descr); }
/// Write the frame in file using PNG format with transparency
inline void WritePNG(const char *fname,const char *descr="", bool alpha=true)
{ if(alpha) mgl_write_png(gr, fname, descr);
else mgl_write_png_solid(gr, fname, descr); }
/// Write the frame in file using BMP format
inline void WriteBMP(const char *fname,const char *descr="")
{ mgl_write_bmp(gr, fname, descr); }
/// Write the frame in file using BMP format
inline void WriteTGA(const char *fname,const char *descr="")
{ mgl_write_tga(gr, fname, descr); }
/// Write the frame in file using PostScript format
inline void WriteEPS(const char *fname,const char *descr="")
{ mgl_write_eps(gr, fname, descr); }
/// Write the frame in file using LaTeX format
inline void WriteTEX(const char *fname,const char *descr="")
{ mgl_write_tex(gr, fname, descr); }
/// Write the frame in file using PostScript format as bitmap
inline void WriteBPS(const char *fname,const char *descr="")
{ mgl_write_bps(gr, fname, descr); }
/// Write the frame in file using SVG format
inline void WriteSVG(const char *fname,const char *descr="")
{ mgl_write_svg(gr, fname, descr); }
/// Write the frame in file using GIF format (only for current frame!)
inline void WriteGIF(const char *fname,const char *descr="")
{ mgl_write_gif(gr, fname, descr); }
/// Write the frame in file using OBJ format
inline void WriteOBJ(const char *fname,const char *descr="",bool use_png=true)
{ mgl_write_obj(gr, fname, descr, use_png); }
/// Write the frame in file using OBJ format - Balakin way
inline void WriteOBJold(const char *fname,const char *descr="",bool use_png=true)
{ mgl_write_obj_old(gr, fname, descr, use_png); }
/// Write the frame in file using XYZ format
inline void WriteXYZ(const char *fname,const char *descr="")
{ mgl_write_xyz(gr, fname, descr); }
/// Write the frame in file using STL format (faces only)
inline void WriteSTL(const char *fname,const char *descr="")
{ mgl_write_stl(gr, fname, descr); }
/// Write the frame in file using OFF format
inline void WriteOFF(const char *fname,const char *descr="", bool colored=false)
{ mgl_write_off(gr, fname, descr,colored); }
// /// Write the frame in file using X3D format
// inline void WriteX3D(const char *fname,const char *descr="")
// { mgl_write_x3d(gr, fname, descr); }
/// Write the frame in file using PRC format
inline void WritePRC(const char *fname,const char *descr="",bool make_pdf=true)
{ mgl_write_prc(gr, fname, descr, make_pdf); }
/// Export in JSON format suitable for later drawing by JavaScript
inline void WriteJSON(const char *fname,const char *descr="",bool force_z=false)
{ if(force_z) mgl_write_json_z(gr, fname, descr);
else mgl_write_json(gr, fname, descr); }
/// Return string of JSON data suitable for later drawing by JavaScript
inline const char *GetJSON() { return mgl_get_json(gr); }
/// Force preparing the image. It can be useful for OpenGL mode mostly.
inline void Finish() { mgl_finish(gr); }
/// Create new frame.
inline void NewFrame() { mgl_new_frame(gr); }
/// Finish frame drawing
inline void EndFrame() { mgl_end_frame(gr); }
/// Get the number of created frames
inline int GetNumFrame() { return mgl_get_num_frame(gr); }
/// Reset frames counter (start it from zero)
inline void ResetFrames() { mgl_reset_frames(gr); }
/// Delete primitives for i-th frame (work if MGL_VECT_FRAME is set on)
inline void DelFrame(int i) { mgl_del_frame(gr, i); }
/// Get drawing data for i-th frame (work if MGL_VECT_FRAME is set on)
inline void GetFrame(int i) { mgl_get_frame(gr, i); }
/// Set drawing data for i-th frame (work if MGL_VECT_FRAME is set on). Work as EndFrame() but don't add frame to GIF image.
inline void SetFrame(int i) { mgl_set_frame(gr, i); }
/// Append drawing data from i-th frame (work if MGL_VECT_FRAME is set on)
inline void ShowFrame(int i){ mgl_show_frame(gr, i); }
/// Clear list of primitives for current drawing
inline void ClearFrame() { mgl_clear_frame(gr); }
/// Start write frames to cinema using GIF format
inline void StartGIF(const char *fname, int ms=100)
{ mgl_start_gif(gr, fname,ms); }
/// Stop writing cinema using GIF format
inline void CloseGIF() { mgl_close_gif(gr); }
/// Export points and primitives in file using MGLD format
inline void ExportMGLD(const char *fname, const char *descr=0)
{ mgl_export_mgld(gr, fname, descr); }
/// Import points and primitives from file using MGLD format
inline void ImportMGLD(const char *fname, bool add=false)
{ mgl_import_mgld(gr, fname, add); }
/// Copy RGB values into array which is allocated by user
/** Position of element {i,j} is [3*i + 3*Width*j]. */
inline bool GetRGB(char *imgdata, int imglen)
{
long w=mgl_get_width(gr), h=mgl_get_height(gr);
if(imglen>=3*w*h) memcpy(imgdata, mgl_get_rgb(gr),3*w*h);
return imglen>=3*w*h;
}
/// Get RGB values of current bitmap
/** Position of element {i,j} is [3*i + 3*Width*j]. */
inline const unsigned char *GetRGB() { return mgl_get_rgb(gr); }
/// Copy RGBA values into array which is allocated by user
/** Position of element {i,j} is [4*i + 4*Width*j]. */
inline bool GetRGBA(char *imgdata, int imglen)
{
long w=mgl_get_width(gr), h=mgl_get_height(gr);
if(imglen>=4*w*h) memcpy(imgdata, mgl_get_rgba(gr),4*w*h);
return imglen>=4*w*h;
}
/// Get RGBA values of current bitmap
/** Position of element {i,j} is [4*i + 4*Width*j]. */
inline const unsigned char *GetRGBA() { return mgl_get_rgba(gr); }
/// Copy BGRN values into array which is allocated by user
inline bool GetBGRN(unsigned char *imgdata, int imglen)
{
long w=mgl_get_width(gr), h=mgl_get_height(gr), i;
const unsigned char *buf=mgl_get_rgb(gr);
if(imglen>=4*w*h) for(i=0;i<w*h;i++)
{
imgdata[4*i] = buf[3*i+2];
imgdata[4*i+1] = buf[3*i+1];
imgdata[4*i+2] = buf[3*i];
imgdata[4*i+3] = 255;
}
return imglen>=4*w*h;
}
/// Copy RGBA values of background image into array which is allocated by user
/** Position of element {i,j} is [4*i + 4*Width*j]. */
inline bool GetBackground(char *imgdata, int imglen)
{
long w=mgl_get_width(gr), h=mgl_get_height(gr);
if(imglen>=4*w*h) memcpy(imgdata, mgl_get_background(gr),4*w*h);
return imglen>=4*w*h;
}
/// Get RGBA values of background image
/** Position of element {i,j} is [4*i + 4*Width*j]. */
inline const unsigned char *GetBackground() { return mgl_get_background(gr); }
/// Get width of the image
inline int GetWidth() { return mgl_get_width(gr); }
/// Get height of the image
inline int GetHeight() { return mgl_get_height(gr);}
/// Calculate 3D coordinate {x,y,z} for screen point {xs,ys}
inline mglPoint CalcXYZ(int xs, int ys)
{
mreal x,y,z;
mgl_calc_xyz(gr,xs,ys,&x,&y,&z);
return mglPoint(x,y,z);
}
/// Calculate screen point {xs,ys} for 3D coordinate {x,y,z}
inline mglPoint CalcScr(mglPoint p)
{
int xs,ys;
mgl_calc_scr(gr,p.x,p.y,p.z,&xs,&ys);
return mglPoint(xs,ys);
}
/// Set object/subplot id
inline void SetObjId(int id) { mgl_set_obj_id(gr,id); }
/// Get object id
inline int GetObjId(long x,long y) { return mgl_get_obj_id(gr,x,y); }
/// Get subplot id
inline int GetSplId(long x,long y) { return mgl_get_spl_id(gr,x,y); }
/// Check if {\a xs,\a ys} is close to active point with accuracy d, and return its position or -1
inline long IsActive(int xs, int ys, int d=1) { return mgl_is_active(gr,xs,ys,d); }
/// Combine plots from 2 canvases. Result will be saved into this
inline void Combine(const mglGraph *g) { mgl_combine_gr(gr,g->gr); }
/// Clear up the frame and fill background by specified color
inline void Clf(double r, double g, double b) { mgl_clf_rgb(gr, r, g, b); }
/// Clear up the frame and fill background by specified color with manual transparency
inline void Clf(const char *col) { mgl_clf_str(gr, col); }
/// Clear up the frame and fill background by specified color
inline void Clf(char col) { mgl_clf_chr(gr, col); }
/// Clear up the frame
inline void Clf() { mgl_clf(gr); }
/// Clear unused points and primitives. Useful only in combination with SetFaceNum().
inline void ClearUnused() { mgl_clear_unused(gr); }
/// Load background image
inline void LoadBackground(const char *fname, double alpha=1)
{ mgl_load_background(gr,fname,alpha); }
/// Force drawing the image and use it as background one
inline void Rasterize() { mgl_rasterize(gr); }
/// Draws the point (ball) at position {x,y,z} with color c
inline void Ball(mglPoint p, char c='r')
{ char s[3]={'.',c,0}; mgl_mark(gr, p.x, p.y, p.z, s); }
/// Draws the mark at position p
inline void Mark(mglPoint p, const char *mark)
{ mgl_mark(gr, p.x, p.y, p.z, mark); }
/// Draws the line between points by specified pen
/** Large \a n (for example, n=100) should be used for geodesic line in curved coordinates */
inline void Line(mglPoint p1, mglPoint p2, const char *pen="B",int n=2)
{ mgl_line(gr, p1.x, p1.y, p1.z, p2.x, p2.y, p2.z, pen, n); }
/// Draws the spline curve between points by specified pen
inline void Curve(mglPoint p1, mglPoint d1, mglPoint p2, mglPoint d2, const char *pen="B", int n=100)
{ mgl_curve(gr, p1.x, p1.y, p1.z, d1.x, d1.y, d1.z, p2.x, p2.y, p2.z, d2.x, d2.y, d2.z, pen, n); }
/// Draws the 3d error box e for point p
inline void Error(mglPoint p, mglPoint e, const char *pen="k")
{ mgl_error_box(gr, p.x, p.y, p.z, e.x, e.y, e.z, pen); }
/// Draws Lamerey diagram for mapping x_new = f(x_old)
/** String \a stl may contain: ‘v’ for drawing arrows; ‘~’ for disable 1st segment.
* Option value set the number of segments (default is 20).*/
inline void Lamerey(double x0, const mglDataA &f, const char *stl="", const char *opt="")
{ mgl_lamerey_dat(gr,x0,&f,stl,opt); }
inline void Lamerey(double x0, const char *func, const char *stl="", const char *opt="")
{ mgl_lamerey_str(gr,x0,func,stl,opt); }
/// Draws Bifurcation diagram for mapping x_new = f(x_old) in x-axis range
/** Option value set the number of stationary points (default is 1024).*/
inline void Bifurcation(double dx, const mglDataA &f, const char *stl="", const char *opt="")
{ mgl_bifurcation_dat(gr,dx,&f,stl,opt); }
inline void Bifurcation(double dx, const char *func, const char *stl="", const char *opt="")
{ mgl_bifurcation_str(gr,dx,func,stl,opt); }
/// Draws Iris plots for determining cross-dependences of data arrays
/** NOTE: using the same ranges and empty ids will not draw axis. This will add data to existing Iris plot.
* Option value set the size of data labels ids, separated by ';'.*/
inline void Iris(mglDataA &dats, const char *ids, const char *stl="", const char *opt="")
{ mgl_iris_1(gr,&dats,ids,stl,opt); }
inline void Iris(mglDataA &dats, const wchar_t *ids, const char *stl="", const char *opt="")
{ mgl_irisw_1(gr,&dats,ids,stl,opt); }
inline void Iris(mglDataA &dats, mglDataA &ranges, const char *ids, const char *stl="", const char *opt="")
{ mgl_iris(gr,&dats,&ranges,ids,stl,opt); }
inline void Iris(mglDataA &dats, mglDataA &ranges, const wchar_t *ids, const char *stl="", const char *opt="")
{ mgl_irisw(gr,&dats,&ranges,ids,stl,opt); }
/// Draws the face between points with color stl (include interpolation up to 4 colors).
inline void Face(mglPoint p1, mglPoint p2, mglPoint p3, mglPoint p4, const char *stl="r")
{ mgl_face(gr, p1.x, p1.y, p1.z, p2.x, p2.y, p2.z, p3.x, p3.y, p3.z, p4.x, p4.y, p4.z, stl); }
/// Draws the face in y-z plane at point p with color stl (include interpolation up to 4 colors).
inline void FaceX(mglPoint p, double wy, double wz, const char *stl="w", double dx=0, double dy=0)
{ mgl_facex(gr, p.x, p.y, p.z, wy, wz, stl, dx, dy); }
/// Draws the face in x-z plane at point p with color stl (include interpolation up to 4 colors).
inline void FaceY(mglPoint p, double wx, double wz, const char *stl="w", double dx=0, double dy=0)
{ mgl_facey(gr, p.x, p.y, p.z, wx, wz, stl, dx, dy); }
/// Draws the face in x-y plane at point p with color stl (include interpolation up to 4 colors).
inline void FaceZ(mglPoint p, double wx, double wy, const char *stl="w", double dx=0, double dy=0)
{ mgl_facez(gr, p.x, p.y, p.z, wx, wy, stl, dx, dy); }
/// Draws the drop at point p in direction d with color col and radius r
/** Parameter \a shift set the degree of drop oblongness: ‘0’ is sphere, ‘1’ is maximally oblongness drop. Parameter \a ap set relative width of the drop (this is analogue of “ellipticity” for the sphere).*/
inline void Drop(mglPoint p, mglPoint d, double r, const char *col="r", double shift=1, double ap=1)
{ mgl_drop(gr, p.x, p.y, p.z, d.x, d.y, d.z, r, col, shift, ap); }
/// Draws the sphere at point p with color col and radius r
inline void Sphere(mglPoint p, double r, const char *col="r")
{ mgl_sphere(gr, p.x, p.y, p.z, r, col); }
/// Draws the cone between points p1,p2 with radius r1,r2 and with style stl
/** Parameter \a stl can contain:
* ‘@’ for drawing edges;
* ‘#’ for wired cones;
* ‘t’ for drawing tubes/cylinder instead of cones/prisms;
* ‘4’, ‘6’, ‘8’ for drawing square, hex- or octo-prism instead of cones.*/
inline void Cone(mglPoint p1, mglPoint p2, double r1, double r2=-1, const char *stl="r@")
{ mgl_cone(gr, p1.x, p1.y, p1.z, p2.x, p2.y, p2.z,r1,r2,stl); }
/// Draws the ellipse between points p1,p2 with color stl and width r
/** Parameter \a stl can contain:
* ‘#’ for wired figure (boundary only);
* ‘@’ for filled figure and with boundary (second color or black one is used for boundary).*/
inline void Ellipse(mglPoint p1, mglPoint p2, double r, const char *stl="r")
{ mgl_ellipse(gr, p1.x, p1.y, p1.z, p2.x, p2.y, p2.z, r,stl); }
/// Draws the circle at point p with color stl and radius r
/** Parameter \a stl can contain:
* ‘#’ for wired figure (boundary only);
* ‘@’ for filled figure and with boundary (second color or black one is used for boundary).*/
inline void Circle(mglPoint p, double r, const char *stl="r")
{ mgl_ellipse(gr, p.x, p.y, p.z, p.x, p.y, p.z, r,stl); }
/// Draws the rhomb between points p1,p2 with color stl and width r
/** Parameter \a stl can contain:
* ‘#’ for wired figure (boundary only);
* ‘@’ for filled figure and with boundary (second color or black one is used for boundary).*/
inline void Rhomb(mglPoint p1, mglPoint p2, double r, const char *stl="r")
{ mgl_rhomb(gr, p1.x, p1.y, p1.z, p2.x, p2.y, p2.z, r,stl); }
/// Draws the polygon based on points p1,p2 with color stl
/** Parameter \a stl can contain:
* ‘#’ for wired figure (boundary only);
* ‘@’ for filled figure and with boundary (second color or black one is used for boundary).*/
inline void Polygon(mglPoint p1, mglPoint p2, int n, const char *stl="r")
{ mgl_polygon(gr, p1.x, p1.y, p1.z, p2.x, p2.y, p2.z, n,stl); }
/// Draws the arc around axis pr with center at p0 and starting from p1, by color stl and angle a (in degrees)
inline void Arc(mglPoint p0, mglPoint pa, mglPoint p1, double a, const char *stl="r")
{ mgl_arc_ext(gr, p0.x,p0.y,p0.z, pa.x,pa.y,pa.z, p1.x,p1.y,p1.z, a,stl); }
/// Draws the arc around axis 'z' with center at p0 and starting from p1, by color stl and angle a (in degrees)
inline void Arc(mglPoint p0, mglPoint p1, double a, const char *stl="r")
{ mgl_arc_ext(gr, p0.x,p0.y,p0.z, 0,0,1, p1.x,p1.y,p0.z, a,stl); }
/// Draws bitmap (logo) which is stretched along whole axis range
inline void Logo(long w, long h, const unsigned char *rgba, bool smooth=false, const char *opt="")
{ mgl_logo(gr, w, h, rgba, smooth, opt); }
inline void Logo(const char *fname, bool smooth=false, const char *opt="")
{ mgl_logo_file(gr, fname, smooth, opt); }
/// Draw user-defined symbol in position p
inline void Symbol(mglPoint p, char id, const char *how="", double size=-1)
{ mgl_symbol(gr, p.x, p.y, p.z, id, how, size); }
/// Draw user-defined symbol in position p along direction d
inline void Symbol(mglPoint p, mglPoint d, char id, const char *how="", double size=-1)
{ mgl_symbol_dir(gr, p.x, p.y, p.z, d.x, d.y, d.z, id, how, size); }
/// Print text in position p with specified font
inline void Putsw(mglPoint p,const wchar_t *text,const char *font=":C",double size=-1)
{ mgl_putsw(gr, p.x, p.y, p.z, text, font, size); }
/// Print text in position p with specified font
inline void Puts(mglPoint p,const char *text,const char *font=":C",double size=-1)
{ mgl_puts(gr, p.x, p.y, p.z, text, font, size); }
/// Print text in position p with specified font
inline void Putsw(double x, double y,const wchar_t *text,const char *font=":AC",double size=-1)
{ mgl_putsw(gr, x, y, 0, text, font, size); }
/// Print text in position p with specified font
inline void Puts(double x, double y,const char *text,const char *font=":AC",double size=-1)
{ mgl_puts(gr, x, y, 0, text, font, size); }
/// Print text in position p along direction d with specified font
inline void Putsw(mglPoint p, mglPoint d, const wchar_t *text, const char *font=":L", double size=-1)
{ mgl_putsw_dir(gr, p.x, p.y, p.z, d.x, d.y, d.z, text, font, size); }
/// Print text in position p along direction d with specified font
inline void Puts(mglPoint p, mglPoint d, const char *text, const char *font=":L", double size=-1)
{ mgl_puts_dir(gr, p.x, p.y, p.z, d.x, d.y, d.z, text, font, size); }
/// Print text along the curve
inline void Text(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *text, const char *font="", const char *opt="")
{ mgl_text_xyz(gr, &x, &y, &z, text, font, opt); }
/// Print text along the curve
inline void Text(const mglDataA &x, const mglDataA &y, const char *text, const char *font="", const char *opt="")
{ mgl_text_xy(gr, &x, &y, text, font, opt); }
/// Print text along the curve
inline void Text(const mglDataA &y, const char *text, const char *font="", const char *opt="")
{ mgl_text_y(gr, &y, text, font, opt); }
/// Print text along the curve
inline void Text(const mglDataA &x, const mglDataA &y, const mglDataA &z, const wchar_t *text, const char *font="", const char *opt="")
{ mgl_textw_xyz(gr, &x, &y, &z, text, font, opt); }
/// Print text along the curve
inline void Text(const mglDataA &x, const mglDataA &y, const wchar_t *text, const char *font="", const char *opt="")
{ mgl_textw_xy(gr, &x, &y, text, font, opt); }
/// Print text along the curve
inline void Text(const mglDataA &y, const wchar_t *text, const char *font="", const char *opt="")
{ mgl_textw_y(gr, &y, text, font, opt); }
/// Draws bounding box outside the plotting volume with color c.
/** Style ‘@’ produce filled back faces. */
inline void Box(const char *col="", bool ticks=true)
{ mgl_box_str(gr, col, ticks); }
/// Draw axises with ticks in direction(s) dir.
/** Parameter \a dir may contain:
* ‘xyzt’for drawing axis in corresponding direction;
* ‘XYZT’ for drawing axis in corresponding direction but with inverted positions of labels;
* ‘~’, ‘_’ for disabling tick labels;
* ‘U’ for disabling rotation of tick labels;
* ‘^’ for inverting default axis origin;
* ‘!’ for disabling ticks tuning;
* ‘AKDTVISO’ for drawing arrow at the end of axis;
* ‘a’ for forced adjusting of axis ticks;
* ‘f’ for printing ticks labels in fixed format;
* ‘E’ for using ‘E’ instead of ‘e’ in ticks labels;
* ‘F’ for printing ticks labels in LaTeX format;
* ‘+’ for printing ‘+’ for positive ticks;
* ‘-’ for printing usual ‘-’ in ticks labels;
* ‘0123456789’ for precision at printing ticks labels.
* Option "value" set the manual rotation angle for the ticks. */
inline void Axis(const char *dir="xyzt", const char *stl="", const char *opt="")
{ mgl_axis(gr, dir,stl,opt); }
/// Draw grid lines perpendicular to direction(s) dir.
inline void Grid(const char *dir="xyzt",const char *pen="B", const char *opt="")
{ mgl_axis_grid(gr, dir, pen, opt); }
/// Print the label text for axis dir.
/** Option "value" set additional shifting of the label. */
inline void Label(char dir, const char *text, double pos=+1, const char *opt="")
{ mgl_label(gr, dir, text, pos, opt); }
/// Print the label text for axis dir.
/** Option "value" set additional shifting of the label. */
inline void Label(char dir, const wchar_t *text, double pos=+1, const char *opt="")
{ mgl_labelw(gr, dir, text, pos, opt); }
/// Draw colorbar at edge of axis
/** Parameter \a sch may contain:
* ‘<>^_’ for positioning at left, at right, at top or at bottom correspondingly;
* ‘I’ for positioning near bounding (by default, at edges of subplot);
* ‘A’ for using absolute coordinates;
* ‘~’ for disabling tick labels.
* ‘!’ for disabling ticks tuning;
* ‘f’ for printing ticks labels in fixed format;
* ‘E’ for using ‘E’ instead of ‘e’ in ticks labels;
* ‘F’ for printing ticks labels in LaTeX format;
* ‘+’ for printing ‘+’ for positive ticks;
* ‘-’ for printing usual ‘-’ in ticks labels;
* ‘0123456789’ for precision at printing ticks labels.*/
inline void Colorbar(const char *sch="")
{ mgl_colorbar(gr, sch); }
/// Draw colorbar at manual position
/** Parameter \a sch may contain:
* ‘<>^_’ for positioning at left, at right, at top or at bottom correspondingly;
* ‘I’ for positioning near bounding (by default, at edges of subplot);
* ‘A’ for using absolute coordinates;
* ‘~’ for disabling tick labels.
* ‘!’ for disabling ticks tuning;
* ‘f’ for printing ticks labels in fixed format;
* ‘E’ for using ‘E’ instead of ‘e’ in ticks labels;
* ‘F’ for printing ticks labels in LaTeX format;
* ‘+’ for printing ‘+’ for positive ticks;
* ‘-’ for printing usual ‘-’ in ticks labels;
* ‘0123456789’ for precision at printing ticks labels.*/
inline void Colorbar(const char *sch,double x,double y,double w=1,double h=1)
{ mgl_colorbar_ext(gr, sch, x,y,w,h); }
/// Draw colorbar with manual colors at edge of axis
/** Parameter \a sch may contain:
* ‘<>^_’ for positioning at left, at right, at top or at bottom correspondingly;
* ‘I’ for positioning near bounding (by default, at edges of subplot);
* ‘A’ for using absolute coordinates;
* ‘~’ for disabling tick labels.
* ‘!’ for disabling ticks tuning;
* ‘f’ for printing ticks labels in fixed format;
* ‘E’ for using ‘E’ instead of ‘e’ in ticks labels;
* ‘F’ for printing ticks labels in LaTeX format;
* ‘+’ for printing ‘+’ for positive ticks;
* ‘-’ for printing usual ‘-’ in ticks labels;
* ‘0123456789’ for precision at printing ticks labels.*/
inline void Colorbar(const mglDataA &val, const char *sch="")
{ mgl_colorbar_val(gr, &val, sch); }
/// Draw colorbar with manual colors at manual position
/** Parameter \a sch may contain:
* ‘<>^_’ for positioning at left, at right, at top or at bottom correspondingly;
* ‘I’ for positioning near bounding (by default, at edges of subplot);
* ‘A’ for using absolute coordinates;
* ‘~’ for disabling tick labels.
* ‘!’ for disabling ticks tuning;
* ‘f’ for printing ticks labels in fixed format;
* ‘E’ for using ‘E’ instead of ‘e’ in ticks labels;
* ‘F’ for printing ticks labels in LaTeX format;
* ‘+’ for printing ‘+’ for positive ticks;
* ‘-’ for printing usual ‘-’ in ticks labels;
* ‘0123456789’ for precision at printing ticks labels.*/
inline void Colorbar(const mglDataA &val, const char *sch,double x,double y,double w=1,double h=1)
{ mgl_colorbar_val_ext(gr, &val, sch, x,y,w,h); }
/// Add string to legend
inline void AddLegend(const char *text,const char *style)
{ mgl_add_legend(gr, text, style); }
inline void AddLegend(const wchar_t *text,const char *style)
{ mgl_add_legendw(gr, text, style); }
/// Clear saved legend string
inline void ClearLegend()
{ mgl_clear_legend(gr); }
/// Draw legend of accumulated strings at position {x,y}
/** Parameter fnt may contain:
* font style for legend text;
* colors for background (first one), border (second one) and text (last one);
* ‘A’ for positioning in absolute coordinates;
* ‘^’ for positioning outside of specified point;
* ‘-’ for arranging entries horizontally;
* ‘#’ for drawing box around legend.
* Option value set the space between line samples and text (default is 0.1).*/
inline void Legend(double x, double y, const char *font="#", const char *opt="")
{ mgl_legend_pos(gr, x, y, font, opt); }
/// Draw legend of accumulated strings
/** Parameter fnt may contain:
* font style for legend text;
* colors for background (first one), border (second one) and text (last one);
* ‘A’ for positioning in absolute coordinates;
* ‘^’ for positioning outside of specified point;
* ‘-’ for arranging entries horizontally;
* ‘#’ for drawing box around legend.
* Option value set the space between line samples and text (default is 0.1).
* Parameter \a where sets position: 0 at bottom-left, 1 at bottom-right, 2 at top-left, 3 at top-right (default).*/
inline void Legend(int where=3, const char *font="#", const char *opt="")
{ mgl_legend(gr, where, font, opt); }
/// Set number of marks in legend sample
inline void SetLegendMarks(int num) { mgl_set_legend_marks(gr, num); }
/// Draw usual curve {x,y,z}
inline void Plot(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *pen="", const char *opt="")
{ mgl_plot_xyz(gr, &x, &y, &z, pen, opt); }
/// Draw usual curve {x,y}
inline void Plot(const mglDataA &x, const mglDataA &y, const char *pen="", const char *opt="")
{ mgl_plot_xy(gr, &x, &y, pen,opt); }
/// Draw usual curve {x,y} with x in x-axis range
inline void Plot(const mglDataA &y, const char *pen="", const char *opt="")
{ mgl_plot(gr, &y, pen,opt); }
/// Draw tapes which rotates as (bi-)normales of curve {x,y,z}
/** The width of tape is proportional to barwidth and can be changed by option "value".*/
inline void Tape(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *pen="", const char *opt="")
{ mgl_tape_xyz(gr, &x, &y, &z, pen, opt); }
/// Draw tapes which rotates as (bi-)normales of curve {x,y}
/** The width of tape is proportional to barwidth and can be changed by option "value".*/
inline void Tape(const mglDataA &x, const mglDataA &y, const char *pen="", const char *opt="")
{ mgl_tape_xy(gr, &x, &y, pen,opt); }
/// Draw tapes which rotates as (bi-)normales of curve {x,y} with x in x-axis range
/** The width of tape is proportional to barwidth and can be changed by option "value".*/
inline void Tape(const mglDataA &y, const char *pen="", const char *opt="")
{ mgl_tape(gr, &y, pen,opt); }
/// Draw radar chart (plot in curved coordinates)
/** Option "value" set the additional shift of data (i.e. the data a+value is used instead of a).*/
inline void Radar(const mglDataA &a, const char *pen="", const char *opt="")
{ mgl_radar(gr, &a, pen, opt); }
/// Draw stairs for points in arrays {x,y,z}
inline void Step(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *pen="", const char *opt="")
{ mgl_step_xyz(gr, &x, &y, &z, pen, opt); }
/// Draw stairs for points in arrays {x,y}
inline void Step(const mglDataA &x, const mglDataA &y, const char *pen="", const char *opt="")
{ mgl_step_xy(gr, &x, &y, pen, opt); }
/// Draw stairs for points in arrays {x,y} with x in x-axis range
inline void Step(const mglDataA &y, const char *pen="", const char *opt="")
{ mgl_step(gr, &y, pen, opt); }
/// Draw curve {x,y,z} which is colored by c (like tension plot)
inline void Tens(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &c, const char *pen="", const char *opt="")
{ mgl_tens_xyz(gr, &x, &y, &z, &c, pen, opt); }
/// Draw curve {x,y} which is colored by c (like tension plot)
inline void Tens(const mglDataA &x, const mglDataA &y, const mglDataA &c, const char *pen="", const char *opt="")
{ mgl_tens_xy(gr, &x, &y, &c, pen, opt); }
/// Draw curve {x,y} with x in x-axis range which is colored by c (like tension plot)
inline void Tens(const mglDataA &y, const mglDataA &c, const char *pen="", const char *opt="")
{ mgl_tens(gr, &y, &c, pen, opt); }
/// Fill area between curve {x,y,z} and axis plane
/** Gradient filling is used if number of specified colors is equal to 2*number of curves.*/
inline void Area(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *pen="", const char *opt="")
{ mgl_area_xyz(gr, &x, &y, &z, pen, opt); }
/// Fill area between curve {x,y} and axis plane
/** Gradient filling is used if number of specified colors is equal to 2*number of curves.*/
inline void Area(const mglDataA &x, const mglDataA &y, const char *pen="", const char *opt="")
{ mgl_area_xy(gr, &x, &y, pen, opt); }
/// Fill area between curve {x,y} with x in x-axis range and axis plane
/** Gradient filling is used if number of specified colors is equal to 2*number of curves.*/
inline void Area(const mglDataA &y, const char *pen="", const char *opt="")
{ mgl_area(gr, &y, pen, opt); }
/// Fill area between curves {x,y1} and {x,y2} with x in x-axis range
/** Style 'i' will fill area only if y1 < y2.
* Gradient filling is used if number of specified colors is equal to 2*number of curves.*/
inline void Region(const mglDataA &y1, const mglDataA &y2, const char *pen="", const char *opt="")
{ mgl_region(gr, &y1, &y2, pen, opt); }
/// Fill area between curves {x,y1} and {x,y2}
/** Style 'i' will fill area only if y1 < y2.
* Gradient filling is used if number of specified colors is equal to 2*number of curves.*/
inline void Region(const mglDataA &x, const mglDataA &y1, const mglDataA &y2, const char *pen="", const char *opt="")
{ mgl_region_xy(gr, &x, &y1, &y2, pen, opt); }
/// Fill area (draw ribbon) between curves {x1,y1,z1} and {x2,y2,z2}
/** Gradient filling is used if number of specified colors is equal to 2*number of curves.*/
inline void Region(const mglDataA &x1, const mglDataA &y1, const mglDataA &z1, const mglDataA &x2, const mglDataA &y2, const mglDataA &z2, const char *pen="", const char *opt="")
{ mgl_region_3d(gr, &x1, &y1, &z1, &x2, &y2, &z2, pen, opt); }
/// Fill area (draw ribbon) between curves {x1,y1} and {x2,y2}
/** Gradient filling is used if number of specified colors is equal to 2*number of curves.*/
inline void Region(const mglDataA &x1, const mglDataA &y1, const mglDataA &x2, const mglDataA &y2, const char *pen="", const char *opt="")
{ mgl_region_3d(gr, &x1, &y1, NULL, &x2, &y2, NULL, pen, opt); }
/// Draw vertical lines from points {x,y,z} to axis plane
inline void Stem(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *pen="", const char *opt="")
{ mgl_stem_xyz(gr, &x, &y, &z, pen, opt); }
/// Draw vertical lines from points {x,y} to axis plane
inline void Stem(const mglDataA &x, const mglDataA &y, const char *pen="", const char *opt="")
{ mgl_stem_xy(gr, &x, &y, pen, opt); }
/// Draw vertical lines from points {x,y} with x in x-axis range to axis plane
inline void Stem(const mglDataA &y, const char *pen="", const char *opt="")
{ mgl_stem(gr, &y, pen, opt); }
/// Draw vertical bars from points {x,y,z} to axis plane
/** String \a pen may contain:
* ‘a’ for drawing boxes one above another (like summation);
* ‘f’ for waterfall chart;
* ‘<’, ‘^’, ‘>’ for aligning boxes: at left, centered, at right.
* Gradient filling is used if number of specified colors is equal to 2*number of curves.*/
inline void Bars(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *pen="", const char *opt="")
{ mgl_bars_xyz(gr, &x, &y, &z, pen, opt); }
/// Draw vertical bars from points {x,y} to axis plane
/** String \a pen may contain:
* ‘a’ for drawing boxes one above another (like summation);
* ‘f’ for waterfall chart;
* ‘<’, ‘^’, ‘>’ for aligning boxes: at left, centered, at right.
* Gradient filling is used if number of specified colors is equal to 2*number of curves.*/
inline void Bars(const mglDataA &x, const mglDataA &y, const char *pen="", const char *opt="")
{ mgl_bars_xy(gr, &x, &y, pen, opt); }
/// Draw vertical bars from points {x,y} with x in x-axis range to axis plane
/** String \a pen may contain:
* ‘a’ for drawing boxes one above another (like summation);
* ‘f’ for waterfall chart;
* ‘<’, ‘^’, ‘>’ for aligning boxes: at left, centered, at right.
* Gradient filling is used if number of specified colors is equal to 2*number of curves.*/
inline void Bars(const mglDataA &y, const char *pen="", const char *opt="")
{ mgl_bars(gr, &y, pen, opt); }
/// Draw horizontal bars from points {x,y} to axis plane
/** String \a pen may contain:
* ‘a’ for drawing boxes one above another (like summation);
* ‘f’ for waterfall chart;
* ‘<’, ‘^’, ‘>’ for aligning boxes: at left, centered, at right.
* Gradient filling is used if number of specified colors is equal to 2*number of curves.*/
inline void Barh(const mglDataA &y, const mglDataA &v, const char *pen="", const char *opt="")
{ mgl_barh_yx(gr, &y, &v, pen, opt); }
/// Draw horizontal bars from points {x,y} with y in y-axis range to axis plane
/** String \a pen may contain:
* ‘a’ for drawing boxes one above another (like summation);
* ‘f’ for waterfall chart;
* ‘<’, ‘^’, ‘>’ for aligning boxes: at left, centered, at right.
* Gradient filling is used if number of specified colors is equal to 2*number of curves.*/
inline void Barh(const mglDataA &v, const char *pen="", const char *opt="")
{ mgl_barh(gr, &v, pen, opt); }
/// Draw chart for data a
/** Space denote transparent color. Style '#' draw black borders. */
inline void Chart(const mglDataA &a, const char *colors="", const char *opt="")
{ mgl_chart(gr, &a, colors,opt); }
/// Draw Open-High-Low-Close (OHLC) diagram
/** Different colors for up and down values are used if number of specified colors is equal to 2*number of curves. */
inline void OHLC(const mglDataA &x, const mglDataA &open, const mglDataA &high, const mglDataA &low, const mglDataA &close, const char *pen="", const char *opt="")
{ mgl_ohlc_x(gr, &x, &open,&high,&low,&close,pen,opt); }
/// Draw Open-High-Low-Close (OHLC) diagram with x in x-axis range
/** Different colors for up and down values are used if number of specified colors is equal to 2*number of curves. */
inline void OHLC(const mglDataA &open, const mglDataA &high, const mglDataA &low, const mglDataA &close, const char *pen="", const char *opt="")
{ mgl_ohlc(gr, &open,&high,&low,&close,pen,opt); }
/// Draw box-plot (special 5-value plot used in statistic)
/** String \a pen may contain ‘<’, ‘^’, ‘>’ for aligning boxes: at left, centered, at right.*/
inline void BoxPlot(const mglDataA &x, const mglDataA &y, const char *pen="", const char *opt="")
{ mgl_boxplot_xy(gr, &x, &y, pen,opt); }
/// Draw box-plot (special 5-value plot used in statistic) with x in x-axis range
/** String \a pen may contain ‘<’, ‘^’, ‘>’ for aligning boxes: at left, centered, at right.*/
inline void BoxPlot(const mglDataA &y, const char *pen="", const char *opt="")
{ mgl_boxplot(gr, &y, pen,opt); }
/// Draw candle plot
/** Different colors are used for up and down values if 2 colors are specified.
* Style ‘#’ force drawing wire candle even for 2-color scheme. */
inline void Candle(const mglDataA &x, const mglDataA &v1, const mglDataA &v2, const mglDataA &y1, const mglDataA &y2, const char *pen="", const char *opt="")
{ mgl_candle_xyv(gr, &x, &v1, &v2, &y1, &y2, pen, opt); }
/// Draw candle plot with x in x-axis range
/** Different colors are used for up and down values if 2 colors are specified.
* Style ‘#’ force drawing wire candle even for 2-color scheme. */
inline void Candle(const mglDataA &v1, const mglDataA &v2, const mglDataA &y1, const mglDataA &y2, const char *pen="", const char *opt="")
{ mgl_candle_yv(gr, &v1, &v2, &y1, &y2, pen, opt); }
inline void Candle(const mglDataA &v1, const mglDataA &v2, const char *pen="", const char *opt="")
{ mgl_candle_yv(gr, &v1, &v2, NULL, NULL, pen, opt); }
/// Draw candle plot with v1=v[i], v2=v[i+1]
/** Different colors are used for up and down values if 2 colors are specified.
* Style ‘#’ force drawing wire candle even for 2-color scheme. */
inline void Candle(const mglDataA &y, const mglDataA &y1, const mglDataA &y2, const char *pen="", const char *opt="")
{ mgl_candle(gr, &y, &y1, &y2, pen, opt); }
/// Draw candle plot with v1=v[i], v2=v[i+1]
/** Different colors are used for up and down values if 2 colors are specified.
* Style ‘#’ force drawing wire candle even for 2-color scheme. */
inline void Candle(const mglDataA &y, const char *pen="", const char *opt="")
{ mgl_candle(gr, &y, NULL, NULL, pen, opt); }
/// Draw cones from points {x,y,z} to axis plane
/** String \a pen may contain:
* ‘@’ for drawing edges;
* ‘#’ for wired cones;
* ‘t’ for drawing tubes/cylinders instead of cones/prisms;
* ‘4’, ‘6’, ‘8’ for drawing square, hex- or octo-prism instead of cones;
* ‘<’, ‘^’ or ‘>’ for aligning cones left, right or centering them at its x-coordinates.
* Gradient filling is used if number of specified colors is equal to 2*number of curves.*/
inline void Cones(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *pen="@", const char *opt="")
{ mgl_cones_xyz(gr, &x, &y, &z, pen, opt); }
/// Draw cones from points {x,z} to axis plane
/** String \a pen may contain:
* ‘@’ for drawing edges;
* ‘#’ for wired cones;
* ‘t’ for drawing tubes/cylinders instead of cones/prisms;
* ‘4’, ‘6’, ‘8’ for drawing square, hex- or octo-prism instead of cones;
* ‘<’, ‘^’ or ‘>’ for aligning cones left, right or centering them at its x-coordinates.
* Gradient filling is used if number of specified colors is equal to 2*number of curves.*/
inline void Cones(const mglDataA &x, const mglDataA &z, const char *pen="@", const char *opt="")
{ mgl_cones_xz(gr, &x, &z, pen, opt); }
/// Draw cones from points {x,z} with x in x-axis range to axis plane
/** String \a pen may contain:
* ‘@’ for drawing edges;
* ‘#’ for wired cones;
* ‘t’ for drawing tubes/cylinders instead of cones/prisms;
* ‘4’, ‘6’, ‘8’ for drawing square, hex- or octo-prism instead of cones;
* ‘<’, ‘^’ or ‘>’ for aligning cones left, right or centering them at its x-coordinates.
* Gradient filling is used if number of specified colors is equal to 2*number of curves.*/
inline void Cones(const mglDataA &z, const char *pen="@", const char *opt="")
{ mgl_cones(gr, &z, pen, opt); }
/// Draw error boxes {ey} at points {x,y} with x in x-axis range
/** Style ‘@’ set to draw large semitransparent mark instead of error box.*/
inline void Error(const mglDataA &y, const mglDataA &ey, const char *pen="", const char *opt="")
{ mgl_error(gr, &y, &ey, pen, opt); }
/// Draw error boxes {ey} at points {x,y}
/** Style ‘@’ set to draw large semitransparent mark instead of error box.*/
inline void Error(const mglDataA &x, const mglDataA &y, const mglDataA &ey, const char *pen="", const char *opt="")
{ mgl_error_xy(gr, &x, &y, &ey, pen, opt); }
/// Draw error boxes {ex,ey} at points {x,y}
/** Style ‘@’ set to draw large semitransparent mark instead of error box.*/
inline void Error(const mglDataA &x, const mglDataA &y, const mglDataA &ex, const mglDataA &ey, const char *pen="", const char *opt="")
{ mgl_error_exy(gr, &x, &y, &ex, &ey, pen, opt); }
/// Draw marks with size r at points {x,y,z}
inline void Mark(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &r, const char *pen, const char *opt="")
{ mgl_mark_xyz(gr, &x, &y, &z, &r, pen, opt); }
/// Draw marks with size r at points {x,y}
inline void Mark(const mglDataA &x, const mglDataA &y, const mglDataA &r, const char *pen, const char *opt="")
{ mgl_mark_xy(gr, &x, &y, &r, pen, opt); }
/// Draw marks with size r at points {x,y} with x in x-axis range
inline void Mark(const mglDataA &y, const mglDataA &r, const char *pen, const char *opt="")
{ mgl_mark_y(gr, &y, &r, pen, opt); }
/// Draw Poincare map at condition s==0 for curve {x,y,z}
inline void Pmap(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &s, const char *pen, const char *opt="")
{ mgl_pmap_xyz(gr, &x, &y, &z, &s, pen, opt); }
/// Draw Poincare map at condition s==0 for curve {x,y}
inline void Pmap(const mglDataA &x, const mglDataA &y, const mglDataA &s, const char *pen, const char *opt="")
{ mgl_pmap_xy(gr, &x, &y, &s, pen, opt); }
/// Draw Poincare map at condition s==0 for curve {x,y} with x in x-axis range
inline void Pmap(const mglDataA &y, const mglDataA &s, const char *pen, const char *opt="")
{ mgl_pmap(gr, &y, &s, pen, opt); }
/// Draw textual marks with size r at points {x,y,z}
inline void TextMark(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &r, const char *text, const char *fnt="", const char *opt="")
{ mgl_textmark_xyzr(gr, &x, &y, &z, &r, text, fnt, opt); }
/// Draw textual marks with size r at points {x,y}
inline void TextMark(const mglDataA &x, const mglDataA &y, const mglDataA &r, const char *text, const char *fnt="", const char *opt="")
{ mgl_textmark_xyr(gr, &x, &y, &r, text, fnt, opt); }
/// Draw textual marks with size r at points {x,y} with x in x-axis range
inline void TextMark(const mglDataA &y, const mglDataA &r, const char *text, const char *fnt="", const char *opt="")
{ mgl_textmark_yr(gr, &y, &r, text, fnt, opt); }
/// Draw textual marks at points {x,y} with x in x-axis range
inline void TextMark(const mglDataA &y, const char *text, const char *fnt="", const char *opt="")
{ mgl_textmark(gr, &y, text, fnt, opt); }
/// Draw textual marks with size r at points {x,y,z}
inline void TextMark(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &r, const wchar_t *text, const char *fnt="", const char *opt="")
{ mgl_textmarkw_xyzr(gr, &x, &y, &z, &r, text, fnt, opt); }
/// Draw textual marks with size r at points {x,y}
inline void TextMark(const mglDataA &x, const mglDataA &y, const mglDataA &r, const wchar_t *text, const char *fnt="", const char *opt="")
{ mgl_textmarkw_xyr(gr, &x, &y, &r, text, fnt, opt); }
/// Draw textual marks with size r at points {x,y} with x in x-axis range
inline void TextMark(const mglDataA &y, const mglDataA &r, const wchar_t *text, const char *fnt="", const char *opt="")
{ mgl_textmarkw_yr(gr, &y, &r, text, fnt, opt); }
/// Draw textual marks at points {x,y} with x in x-axis range
inline void TextMark(const mglDataA &y, const wchar_t *text, const char *fnt="", const char *opt="")
{ mgl_textmarkw(gr, &y, text, fnt, opt); }
/// Draw labels for points coordinate(s) at points {x,y,z}
/** String \a fnt may contain:
* ‘f’ for fixed format of printed numbers;
* ‘E’ for using ‘E’ instead of ‘e’;
* ‘F’ for printing in LaTeX format;
* ‘+’ for printing ‘+’ for positive numbers;
* ‘-’ for printing usual ‘-’;
* ‘0123456789’ for precision at printing numbers.*/
inline void Label(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *text, const char *fnt="", const char *opt="")
{ mgl_label_xyz(gr, &x, &y, &z, text, fnt, opt); }
/// Draw labels for points coordinate(s) at points {x,y}
/** String \a fnt may contain:
* ‘f’ for fixed format of printed numbers;
* ‘E’ for using ‘E’ instead of ‘e’;
* ‘F’ for printing in LaTeX format;
* ‘+’ for printing ‘+’ for positive numbers;
* ‘-’ for printing usual ‘-’;
* ‘0123456789’ for precision at printing numbers.*/
inline void Label(const mglDataA &x, const mglDataA &y, const char *text, const char *fnt="", const char *opt="")
{ mgl_label_xy(gr, &x, &y, text, fnt, opt); }
/// Draw labels for points coordinate(s) at points {x,y} with x in x-axis range
/** String \a fnt may contain:
* ‘f’ for fixed format of printed numbers;
* ‘E’ for using ‘E’ instead of ‘e’;
* ‘F’ for printing in LaTeX format;
* ‘+’ for printing ‘+’ for positive numbers;
* ‘-’ for printing usual ‘-’;
* ‘0123456789’ for precision at printing numbers.*/
inline void Label(const mglDataA &y, const char *text, const char *fnt="", const char *opt="")
{ mgl_label_y(gr, &y, text, fnt, opt); }
/// Draw labels for points coordinate(s) at points {x,y,z}
/** String \a fnt may contain:
* ‘f’ for fixed format of printed numbers;
* ‘E’ for using ‘E’ instead of ‘e’;
* ‘F’ for printing in LaTeX format;
* ‘+’ for printing ‘+’ for positive numbers;
* ‘-’ for printing usual ‘-’;
* ‘0123456789’ for precision at printing numbers.*/
inline void Label(const mglDataA &x, const mglDataA &y, const mglDataA &z, const wchar_t *text, const char *fnt="", const char *opt="")
{ mgl_labelw_xyz(gr, &x, &y, &z, text, fnt, opt); }
/// Draw labels for points coordinate(s) at points {x,y}
/** String \a fnt may contain:
* ‘f’ for fixed format of printed numbers;
* ‘E’ for using ‘E’ instead of ‘e’;
* ‘F’ for printing in LaTeX format;
* ‘+’ for printing ‘+’ for positive numbers;
* ‘-’ for printing usual ‘-’;
* ‘0123456789’ for precision at printing numbers.*/
inline void Label(const mglDataA &x, const mglDataA &y, const wchar_t *text, const char *fnt="", const char *opt="")
{ mgl_labelw_xy(gr, &x, &y, text, fnt, opt); }
/// Draw labels for points coordinate(s) at points {x,y} with x in x-axis range
/** String \a fnt may contain:
* ‘f’ for fixed format of printed numbers;
* ‘E’ for using ‘E’ instead of ‘e’;
* ‘F’ for printing in LaTeX format;
* ‘+’ for printing ‘+’ for positive numbers;
* ‘-’ for printing usual ‘-’;
* ‘0123456789’ for precision at printing numbers.*/
inline void Label(const mglDataA &y, const wchar_t *text, const char *fnt="", const char *opt="")
{ mgl_labelw_y(gr, &y, text, fnt, opt); }
/// Draw table for values val along given direction with row labels text
/** String \a fnt may contain:
* ‘#’ for drawing cell borders;
* ‘|’ for limiting table widh by subplot one (equal to option ‘value 1’);
* ‘=’ for equal width of all cells;
* ‘f’ for fixed format of printed numbers;
* ‘E’ for using ‘E’ instead of ‘e’;
* ‘F’ for printing in LaTeX format;
* ‘+’ for printing ‘+’ for positive numbers;
* ‘-’ for printing usual ‘-’;
* ‘0123456789’ for precision at printing numbers.
* Option value set the width of the table (default is 1).*/
inline void Table(const mglDataA &val, const char *text, const char *fnt="#|", const char *opt="")
{ mgl_table(gr, 0, 0, &val, text, fnt, opt); }
/// Draw table for values val along given direction with row labels text
/** String \a fnt may contain:
* ‘#’ for drawing cell borders;
* ‘|’ for limiting table widh by subplot one (equal to option ‘value 1’);
* ‘=’ for equal width of all cells;
* ‘f’ for fixed format of printed numbers;
* ‘E’ for using ‘E’ instead of ‘e’;
* ‘F’ for printing in LaTeX format;
* ‘+’ for printing ‘+’ for positive numbers;
* ‘-’ for printing usual ‘-’;
* ‘0123456789’ for precision at printing numbers.
* Option value set the width of the table (default is 1).*/
inline void Table(const mglDataA &val, const wchar_t *text, const char *fnt="#|", const char *opt="")
{ mgl_tablew(gr, 0, 0, &val, text, fnt, opt); }
/// Draw table for values val along given direction with row labels text at given position
/** String \a fnt may contain:
* ‘#’ for drawing cell borders;
* ‘|’ for limiting table widh by subplot one (equal to option ‘value 1’);
* ‘=’ for equal width of all cells;
* ‘f’ for fixed format of printed numbers;
* ‘E’ for using ‘E’ instead of ‘e’;
* ‘F’ for printing in LaTeX format;
* ‘+’ for printing ‘+’ for positive numbers;
* ‘-’ for printing usual ‘-’;
* ‘0123456789’ for precision at printing numbers.
* Option value set the width of the table (default is 1).*/
inline void Table(double x, double y, const mglDataA &val, const char *text, const char *fnt="#|", const char *opt="")
{ mgl_table(gr, x, y, &val, text, fnt, opt); }
/// Draw table for values val along given direction with row labels text at given position
/** String \a fnt may contain:
* ‘#’ for drawing cell borders;
* ‘|’ for limiting table widh by subplot one (equal to option ‘value 1’);
* ‘=’ for equal width of all cells;
* ‘f’ for fixed format of printed numbers;
* ‘E’ for using ‘E’ instead of ‘e’;
* ‘F’ for printing in LaTeX format;
* ‘+’ for printing ‘+’ for positive numbers;
* ‘-’ for printing usual ‘-’;
* ‘0123456789’ for precision at printing numbers.
* Option value set the width of the table (default is 1).*/
inline void Table(double x, double y, const mglDataA &val, const wchar_t *text, const char *fnt="#|", const char *opt="")
{ mgl_tablew(gr, x, y, &val, text, fnt, opt); }
/// Draw tube with radius r around curve {x,y,z}
inline void Tube(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &r, const char *pen="", const char *opt="")
{ mgl_tube_xyzr(gr, &x, &y, &z, &r, pen, opt); }
/// Draw tube with radius r around curve {x,y,z}
inline void Tube(const mglDataA &x, const mglDataA &y, const mglDataA &z, double r, const char *pen="", const char *opt="")
{ mgl_tube_xyz(gr, &x, &y, &z, r, pen, opt); }
/// Draw tube with radius r around curve {x,y}
inline void Tube(const mglDataA &x, const mglDataA &y, const mglDataA &r, const char *pen="", const char *opt="")
{ mgl_tube_xyr(gr, &x, &y, &r, pen, opt); }
/// Draw tube with radius r around curve {x,y}
inline void Tube(const mglDataA &x, const mglDataA &y, double r, const char *pen="", const char *opt="")
{ mgl_tube_xy(gr, &x, &y, r, pen, opt); }
/// Draw tube with radius r around curve {x,y} with x in x-axis range
inline void Tube(const mglDataA &y, const mglDataA &r, const char *pen="", const char *opt="")
{ mgl_tube_r(gr, &y, &r, pen, opt); }
/// Draw tube with radius r around curve {x,y} with x in x-axis range
inline void Tube(const mglDataA &y, double r, const char *pen="", const char *opt="")
{ mgl_tube(gr, &y, r, pen, opt); }
/// Draw surface of curve {r,z} rotation around axis
/** Style ‘#’ produce wire plot. Style ‘.’ produce plot by dots.*/
inline void Torus(const mglDataA &r, const mglDataA &z, const char *pen="", const char *opt="")
{ mgl_torus(gr, &r, &z, pen,opt); }
/// Draw mesh lines for 2d data specified parametrically
inline void Mesh(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *stl="", const char *opt="")
{ mgl_mesh_xy(gr, &x, &y, &z, stl, opt); }
/// Draw mesh lines for 2d data
inline void Mesh(const mglDataA &z, const char *stl="", const char *opt="")
{ mgl_mesh(gr, &z, stl, opt); }
/// Draw waterfall plot for 2d data specified parametrically
/** Style 'x' draw lines in x-direction. */
inline void Fall(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *stl="", const char *opt="")
{ mgl_fall_xy(gr, &x, &y, &z, stl, opt); }
/// Draw waterfall plot for 2d data
/** Style 'x' draw lines in x-direction. */
inline void Fall(const mglDataA &z, const char *stl="", const char *opt="")
{ mgl_fall(gr, &z, stl, opt); }
/// Draw belts for 2d data specified parametrically
/** Style 'x' draw belts in x-direction. */
inline void Belt(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *stl="", const char *opt="")
{ mgl_belt_xy(gr, &x, &y, &z, stl, opt); }
/// Draw belts for 2d data
/** Style 'x' draw belts in x-direction. */
inline void Belt(const mglDataA &z, const char *stl="", const char *opt="")
{ mgl_belt(gr, &z, stl, opt); }
/// Draw belts for 2d data specified parametrically with color proportional to c
/** Style 'x' draw belts in x-direction. */
inline void BeltC(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &c, const char *stl="", const char *opt="")
{ mgl_beltc_xy(gr, &x, &y, &z, &c, stl, opt); }
/// Draw belts for 2d data with color proportional to c
/** Style 'x' draw belts in x-direction. */
inline void BeltC(const mglDataA &z, const mglDataA &c, const char *stl="", const char *opt="")
{ mgl_beltc(gr, &z, &c, stl, opt); }
/// Draw surface for 2d data specified parametrically with color proportional to z
/** Style ‘#’ draw grid lines. Style ‘.’ produce plot by dots.*/
inline void Surf(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *stl="", const char *opt="")
{ mgl_surf_xy(gr, &x, &y, &z, stl, opt); }
/// Draw surface for 2d data with color proportional to z
/** Style ‘#’ draw grid lines. Style ‘.’ produce plot by dots.*/
inline void Surf(const mglDataA &z, const char *stl="", const char *opt="")
{ mgl_surf(gr, &z, stl, opt); }
/// Draw grid lines for density plot of 2d data specified parametrically
inline void Grid(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *stl="", const char *opt="")
{ mgl_grid_xy(gr, &x, &y, &z, stl, opt); }
/// Draw grid lines for density plot of 2d data
inline void Grid(const mglDataA &z, const char *stl="", const char *opt="")
{ mgl_grid(gr, &z, stl, opt); }
/// Draw vertical tiles with manual colors c for 2d data specified parametrically
inline void Tile(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &c, const char *stl="", const char *opt="")
{ mgl_tile_xyc(gr, &x, &y, &z, &c, stl, opt); }
/// Draw vertical tiles for 2d data specified parametrically
inline void Tile(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *stl="", const char *opt="")
{ mgl_tile_xy(gr, &x, &y, &z, stl, opt); }
/// Draw vertical tiles for 2d data
inline void Tile(const mglDataA &z, const char *stl="", const char *opt="")
{ mgl_tile(gr, &z, stl, opt); }
/// Draw density plot for 2d data specified parametrically
/** Style ‘#’ draw grid lines. Style ‘.’ produce plot by dots.*/
inline void Dens(const mglDataA &x, const mglDataA &y, const mglDataA &c, const char *stl="", const char *opt="")
{ mgl_dens_xy(gr, &x, &y, &c, stl, opt); }
/// Draw density plot for 2d data
/** Style ‘#’ draw grid lines. Style ‘.’ produce plot by dots.*/
inline void Dens(const mglDataA &c, const char *stl="", const char *opt="")
{ mgl_dens(gr, &c, stl, opt); }
/// Draw vertical boxes for 2d data specified parametrically
/** Style ‘#’ draw filled boxes. */
inline void Boxs(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *stl="", const char *opt="")
{ mgl_boxs_xy(gr, &x, &y, &z, stl, opt); }
/// Draw vertical boxes for 2d data
/** Style ‘#’ draw filled boxes. */
inline void Boxs(const mglDataA &z, const char *stl="", const char *opt="")
{ mgl_boxs(gr, &z, stl, opt); }
/// Draw contour lines on parametric surface at manual levels for 2d data specified parametrically
/** Style ‘f’ to draw solid contours.
* Style 't'/'T' draw contour labels below/above contours.*/
inline void ContP(const mglDataA &v, const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *sch="", const char *opt="")
{ mgl_contp_val(gr, &v, &x, &y, &z, &a, sch, opt); }
/// Draw contour lines on parametric surface at manual levels for 2d data specified parametrically
/** Style ‘f’ to draw solid contours.
* Style ‘t’/‘T’ draw contour labels below/above contours.
* Option "value" set the number of contour levels (default is 7). */
inline void ContP(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *sch="", const char *opt="")
{ mgl_contp(gr, &x, &y, &z, &a, sch, opt); }
/// Draw contour lines at manual levels for 2d data specified parametrically
/** Style ‘_’ to draw contours at bottom of axis box.
* Style 't'/'T' draw contour labels below/above contours.*/
inline void Cont(const mglDataA &v, const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_cont_xy_val(gr, &v, &x, &y, &z, sch, opt); }
/// Draw contour lines for 2d data
/** Style ‘_’ to draw contours at bottom of axis box.
* Style 't'/'T' draw contour labels below/above contours.*/
inline void Cont(const mglDataA &v, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_cont_val(gr, &v, &z, sch, opt); }
/// Draw contour lines at manual levels for 2d data specified parametrically
/** Style ‘_’ to draw contours at bottom of axis box.
* Style ‘t’/‘T’ draw contour labels below/above contours.
* Option "value" set the number of contour levels (default is 7). */
inline void Cont(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_cont_xy(gr, &x, &y, &z, sch, opt); }
/// Draw contour lines for 2d data
/** Style ‘_’ to draw contours at bottom of axis box.
* Style ‘t’/‘T’ draw contour labels below/above contours.
* Option "value" set the number of contour levels (default is 7). */
inline void Cont(const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_cont(gr, &z, sch, opt); }
/// Draw contour lines at a[i,j]=val specified parametrically
/** Style 't'/'T' draw contour labels below/above contours.*/
inline void ContGen(mreal val, const mglDataA &a, const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_cont_gen(gr, val, &a, &x, &y, &z, sch, opt); }
/// Draw solid contours at manual levels for 2d data specified parametrically
/** Style ‘_’ to draw contours at bottom of axis box. */
inline void ContF(const mglDataA &v, const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_contf_xy_val(gr, &v, &x, &y, &z, sch, opt); }
/// Draw solid contours at manual levels for 2d data
/** Style ‘_’ to draw contours at bottom of axis box. */
inline void ContF(const mglDataA &v, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_contf_val(gr, &v, &z, sch, opt); }
/// Draw solid contours for 2d data specified parametrically
/** Style ‘_’ to draw contours at bottom of axis box.
* Option "value" set the number of contour levels (default is 7). */
inline void ContF(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_contf_xy(gr, &x, &y, &z, sch, opt); }
/// Draw solid contours for 2d data
/** Style ‘_’ to draw contours at bottom of axis box.
* Option "value" set the number of contour levels (default is 7). */
inline void ContF(const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_contf(gr, &z, sch, opt); }
/// Draw solid contours between a[i,j]=v1 and a[i,j]=v2 specified parametrically */
inline void ContFGen(mreal v1, mreal v2, const mglDataA &a, const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_contf_gen(gr, v1,v2, &a, &x, &y, &z, sch, opt); }
/// Draw solid contours at manual levels for 2d data specified parametrically with specified colors
/** Style ‘_’ to draw contours at bottom of axis box. */
inline void ContD(const mglDataA &v, const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_contd_xy_val(gr, &v, &x, &y, &z, sch, opt); }
/// Draw solid contours at manual levels for 2d data with specified colors
/** Style ‘_’ to draw contours at bottom of axis box. */
inline void ContD(const mglDataA &v, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_contd_val(gr, &v, &z, sch, opt); }
/// Draw solid contours for 2d data specified parametrically with specified colors
/** Style ‘_’ to draw contours at bottom of axis box.
* Option "value" set the number of contour levels (default is 7). */
inline void ContD(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_contd_xy(gr, &x, &y, &z, sch, opt); }
/// Draw solid contours for 2d data with specified colors
/** Style ‘_’ to draw contours at bottom of axis box.
* Option "value" set the number of contour levels (default is 7). */
inline void ContD(const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_contd(gr, &z, sch, opt); }
/// Draw contour tubes between manual levels for 2d data specified parametrically
/** Style ‘_’ to draw contours at bottom of axis box. */
inline void ContV(const mglDataA &v, const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_contv_xy_val(gr, &v, &x, &y, &z, sch, opt); }
/// Draw contour tubes between manual levels for 2d data
/** Style ‘_’ to draw contours at bottom of axis box. */
inline void ContV(const mglDataA &v, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_contv_val(gr, &v, &z, sch, opt); }
/// Draw contour tubes for 2d data specified parametrically
/** Style ‘_’ to draw contours at bottom of axis box.
* Option "value" set the number of contour levels (default is 7). */
inline void ContV(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_contv_xy(gr, &x, &y, &z, sch, opt); }
/// Draw contour tubes for 2d data
/** Style ‘_’ to draw contours at bottom of axis box.
* Option "value" set the number of contour levels (default is 7). */
inline void ContV(const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_contv(gr, &z, sch, opt); }
/// Draw axial-symmetric isosurfaces at manual levels for 2d data specified parametrically
/** String \a sch may contain:
* ‘#’ for wired plot;
* ‘.’ for plot by dots;
* ‘x’, ‘z’ for rotation around x-, z-axis correspondingly (default is y-axis). */
inline void Axial(const mglDataA &v, const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_axial_xy_val(gr, &v, &x, &y, &z, sch,opt); }
/// Draw axial-symmetric isosurfaces at manual levels for 2d data
/** String \a sch may contain:
* ‘#’ for wired plot;
* ‘.’ for plot by dots;
* ‘x’, ‘z’ for rotation around x-, z-axis correspondingly (default is y-axis). */
inline void Axial(const mglDataA &v, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_axial_val(gr, &v, &z, sch, opt); }
/// Draw axial-symmetric isosurfaces for 2d data specified parametrically
/** String \a sch may contain:
* ‘#’ for wired plot;
* ‘.’ for plot by dots;
* ‘x’, ‘z’ for rotation around x-, z-axis correspondingly (default is y-axis).
* Option "value" set the number of isosurfaces (default is 3). */
inline void Axial(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_axial_xy(gr, &x, &y, &z, sch, opt); }
/// Draw axial-symmetric isosurfaces for 2d data
/** String \a sch may contain:
* ‘#’ for wired plot;
* ‘.’ for plot by dots;
* ‘x’, ‘z’ for rotation around x-, z-axis correspondingly (default is y-axis).
* Option "value" set the number of isosurfaces (default is 3). */
inline void Axial(const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_axial(gr, &z, sch, opt); }
/// Draw grid lines for density plot at slice for 3d data specified parametrically
/** Style ‘x’ or ‘z’ produce plot perpendicular to x- or z-direction correspondingly.*/
inline void Grid3(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *stl="", double sVal=-1, const char *opt="")
{ mgl_grid3_xyz(gr, &x, &y, &z, &a, stl, sVal, opt); }
/// Draw grid lines for density plot at slice for 3d data
/** Style ‘x’ or ‘z’ produce plot perpendicular to x- or z-direction correspondingly.*/
inline void Grid3(const mglDataA &a, const char *stl="", double sVal=-1, const char *opt="")
{ mgl_grid3(gr, &a, stl, sVal, opt); }
/// Draw density plot at slice for 3d data specified parametrically
/** Style ‘#’ draw grid lines. Style ‘x’ or ‘z’ produce plot perpendicular to x- or z-direction correspondingly.*/
inline void Dens3(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *stl="", double sVal=-1, const char *opt="")
{ mgl_dens3_xyz(gr, &x, &y, &z, &a, stl, sVal, opt); }
/// Draw density plot at slice for 3d data
/** Style ‘#’ draw grid lines. Style ‘x’ or ‘z’ produce plot perpendicular to x- or z-direction correspondingly.*/
inline void Dens3(const mglDataA &a, const char *stl="", double sVal=-1, const char *opt="")
{ mgl_dens3(gr, &a, stl, sVal, opt); }
/// Draw isosurface for 3d data specified parametrically
/** Style ‘#’ draw wired plot. Style ‘.’ produce plot by dots.*/
inline void Surf3(double Val, const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *stl="", const char *opt="")
{ mgl_surf3_xyz_val(gr, Val, &x, &y, &z, &a, stl, opt); }
/// Draw isosurface for 3d data
/** Style ‘#’ draw wired plot. Style ‘.’ produce plot by dots.*/
inline void Surf3(double Val, const mglDataA &a, const char *stl="", const char *opt="")
{ mgl_surf3_val(gr, Val, &a, stl, opt); }
/// Draw isosurfaces for 3d data specified parametrically
/** Style ‘#’ draw wired plot. Style ‘.’ produce plot by dots.
* Option "value" set the number of isosurfaces (default is 3). */
inline void Surf3(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *stl="", const char *opt="")
{ mgl_surf3_xyz(gr, &x, &y, &z, &a, stl, opt); }
/// Draw isosurfaces for 3d data
/** Style ‘#’ draw wired plot. Style ‘.’ produce plot by dots.
* Option "value" set the number of isosurfaces (default is 3). */
inline void Surf3(const mglDataA &a, const char *stl="", const char *opt="")
{ mgl_surf3(gr, &a, stl, opt); }
/// Draw a semi-transparent cloud for 3d data specified parametrically
/** Style ‘.’ produce plot by dots. Style ‘i’ use inverted values for transparency. */
inline void Cloud(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *stl="", const char *opt="")
{ mgl_cloud_xyz(gr, &x, &y, &z, &a, stl, opt); }
/// Draw a semi-transparent cloud for 3d data
/** Style ‘.’ produce plot by dots. Style ‘i’ use inverted values for transparency. */
inline void Cloud(const mglDataA &a, const char *stl="", const char *opt="")
{ mgl_cloud(gr, &a, stl, opt); }
/// Draw contour lines at manual levels along slice for 3d data specified parametrically
/** Style ‘#’ draw grid lines.
* Style ‘x’ or ‘z’ produce plot perpendicular to x- or z-direction correspondingly.
* Style ‘t’/‘T’ draw contour labels below/above contours. */
inline void Cont3(const mglDataA &v, const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *sch="", double sVal=-1, const char *opt="")
{ mgl_cont3_xyz_val(gr, &v, &x, &y, &z, &a, sch, sVal, opt); }
/// Draw contour lines at manual levels along slice for 3d data
/** Style ‘#’ draw grid lines.
* Style ‘x’ or ‘z’ produce plot perpendicular to x- or z-direction correspondingly.
* Style ‘t’/‘T’ draw contour labels below/above contours. */
inline void Cont3(const mglDataA &v, const mglDataA &a, const char *sch="", double sVal=-1, const char *opt="")
{ mgl_cont3_val(gr, &v, &a, sch, sVal, opt); }
/// Draw contour lines along slice for 3d data specified parametrically
/** Style ‘#’ draw grid lines.
* Style ‘x’ or ‘z’ produce plot perpendicular to x- or z-direction correspondingly.
* Style ‘t’/‘T’ draw contour labels below/above contours.
* Option "value" set the number of contour levels (default is 7). */
inline void Cont3(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *sch="", double sVal=-1, const char *opt="")
{ mgl_cont3_xyz(gr, &x, &y, &z, &a, sch, sVal, opt); }
/// Draw contour lines along slice for 3d data
/** Style ‘#’ draw grid lines.
* Style ‘x’ or ‘z’ produce plot perpendicular to x- or z-direction correspondingly.
* Style ‘t’/‘T’ draw contour labels below/above contours.
* Option "value" set the number of contour levels (default is 7). */
inline void Cont3(const mglDataA &a, const char *sch="", double sVal=-1, const char *opt="")
{ mgl_cont3(gr, &a, sch, sVal, opt); }
/// Draw solid contours at manual levels along slice for 3d data specified parametrically
/** Style ‘#’ draw grid lines. Style ‘x’ or ‘z’ produce plot perpendicular to x- or z-direction correspondingly. */
inline void ContF3(const mglDataA &v, const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *sch="", double sVal=-1, const char *opt="")
{ mgl_contf3_xyz_val(gr, &v, &x, &y, &z, &a, sch, sVal, opt); }
/// Draw solid contours at manual levels along slice for 3d data
/** Style ‘#’ draw grid lines. Style ‘x’ or ‘z’ produce plot perpendicular to x- or z-direction correspondingly. */
inline void ContF3(const mglDataA &v, const mglDataA &a, const char *sch="", double sVal=-1, const char *opt="")
{ mgl_contf3_val(gr, &v, &a, sch, sVal, opt); }
/// Draw solid contours along slice for 3d data specified parametrically
/** Style ‘#’ draw grid lines. Style ‘x’ or ‘z’ produce plot perpendicular to x- or z-direction correspondingly.
* Option "value" set the number of contour levels (default is 7).*/
inline void ContF3(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *sch="", double sVal=-1, const char *opt="")
{ mgl_contf3_xyz(gr, &x, &y, &z, &a, sch, sVal, opt); }
/// Draw solid contours along slice for 3d data
/** Style ‘#’ draw grid lines. Style ‘x’ or ‘z’ produce plot perpendicular to x- or z-direction correspondingly.
* Option "value" set the number of contour levels (default is 7).*/
inline void ContF3(const mglDataA &a, const char *sch="", double sVal=-1, const char *opt="")
{ mgl_contf3(gr, &a, sch, sVal, opt); }
/// Draw several isosurfaces for 3d beam in curvilinear coordinates
/** Style ‘#’ draw grid lines. Style ‘.’ produce plot by dots.
* Variable \a flag is bitwise:
* ‘0x1’ - draw in accompanied (not laboratory) coordinates;
* ‘0x2’ - draw projection to \rho-z plane;
* ‘0x4’ - draw normalized in each slice field.*/
inline void Beam(const mglDataA &tr, const mglDataA &g1, const mglDataA &g2, const mglDataA &a, double r, const char *stl=0, int flag=0, int num=3)
{ mgl_beam(gr, &tr,&g1,&g2,&a,r,stl,flag,num); }
/// Draw isosurface at value \a val for 3d beam in curvilinear coordinates
/** Style ‘#’ draw grid lines. Style ‘.’ produce plot by dots.
* Variable \a flag is bitwise:
* ‘0x1’ - draw in accompanied (not laboratory) coordinates;
* ‘0x2’ - draw projection to \rho-z plane;
* ‘0x4’ - draw normalized in each slice field.*/
inline void Beam(double val, const mglDataA &tr, const mglDataA &g1, const mglDataA &g2, const mglDataA &a, double r, const char *stl=NULL, int flag=0)
{ mgl_beam_val(gr,val,&tr,&g1,&g2,&a,r,stl,flag); }
/// Draw vertical tiles with variable size r and manual colors c for 2d data specified parametrically
inline void TileS(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &r, const mglDataA &c, const char *stl="", const char *opt="")
{ mgl_tiles_xyc(gr, &x, &y, &z, &r, &c, stl, opt); }
/// Draw vertical tiles with variable size r for 2d data specified parametrically
inline void TileS(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &r, const char *stl="", const char *opt="")
{ mgl_tiles_xy(gr, &x, &y, &z, &r, stl, opt); }
/// Draw vertical tiles with variable size r for 2d data
inline void TileS(const mglDataA &z, const mglDataA &r, const char *stl="", const char *opt="")
{ mgl_tiles(gr, &z, &r, stl, opt); }
/// Draw surface for 2d data specified parametrically with color proportional to c
/** Style ‘#’ draw grid lines. Style ‘.’ produce plot by dots.*/
inline void SurfC(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &c, const char *sch="", const char *opt="")
{ mgl_surfc_xy(gr, &x, &y, &z, &c, sch,opt); }
/// Draw surface for 2d data with color proportional to c
/** Style ‘#’ draw grid lines. Style ‘.’ produce plot by dots.*/
inline void SurfC(const mglDataA &z, const mglDataA &c, const char *sch="", const char *opt="")
{ mgl_surfc(gr, &z, &c, sch,opt); }
/// Draw surface for 2d data specified parametrically with alpha proportional to c
/** Style ‘#’ draw grid lines. Style ‘.’ produce plot by dots.*/
inline void SurfA(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &c, const char *sch="", const char *opt="")
{ mgl_surfa_xy(gr, &x, &y, &z, &c, sch,opt); }
/// Draw surface for 2d data with alpha proportional to c
/** Style ‘#’ draw grid lines. Style ‘.’ produce plot by dots.*/
inline void SurfA(const mglDataA &z, const mglDataA &c, const char *sch="", const char *opt="")
{ mgl_surfa(gr, &z, &c, sch,opt); }
/// Draw surface for 2d data specified parametrically with color proportional to c and alpha proportional to a
/** Style ‘#’ draw grid lines. Style ‘.’ produce plot by dots.*/
inline void SurfCA(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &c, const mglDataA &a, const char *sch="", const char *opt="")
{ mgl_surfca_xy(gr, &x, &y, &z, &c, &a, sch,opt); }
/// Draw surface for 2d data with color proportional to c and alpha proportional to a
/** Style ‘#’ draw grid lines. Style ‘.’ produce plot by dots.*/
inline void SurfCA(const mglDataA &z, const mglDataA &c, const mglDataA &a, const char *sch="", const char *opt="")
{ mgl_surfca(gr, &z, &c, &a, sch,opt); }
/// Color map of matrix a to matrix b, both matrix can parametrically depend on coordinates
/** Style ‘.’ produce plot by dots. */
inline void Map(const mglDataA &x, const mglDataA &y, const mglDataA &a, const mglDataA &b, const char *sch="", const char *opt="")
{ mgl_map_xy(gr, &x, &y, &a, &b, sch, opt); }
/// Color map of matrix a to matrix b
/** Style ‘.’ produce plot by dots. */
inline void Map(const mglDataA &a, const mglDataA &b, const char *sch="", const char *opt="")
{ mgl_map(gr, &a, &b, sch, opt); }
/// Draw density plot for spectra-gramm specified parametrically
/** Style ‘#’ draw grid lines. Style ‘.’ produce plot by dots.*/
inline void STFA(const mglDataA &x, const mglDataA &y, const mglDataA &re, const mglDataA &im, int dn, const char *sch="", const char *opt="")
{ mgl_stfa_xy(gr, &x, &y, &re, &im, dn, sch, opt); }
/// Draw density plot for spectra-gramm
/** Style ‘#’ draw grid lines. Style ‘.’ produce plot by dots.*/
inline void STFA(const mglDataA &re, const mglDataA &im, int dn, const char *sch="", const char *opt="")
{ mgl_stfa(gr, &re, &im, dn, sch, opt); }
/// Draw isosurface for 3d data specified parametrically with alpha proportional to b
/** Style ‘#’ draw wired plot. Style ‘.’ produce plot by dots. */
inline void Surf3A(double Val, const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const mglDataA &b, const char *stl="", const char *opt="")
{ mgl_surf3a_xyz_val(gr, Val, &x, &y, &z, &a, &b, stl, opt); }
/// Draw isosurface for 3d data with alpha proportional to b
/** Style ‘#’ draw wired plot. Style ‘.’ produce plot by dots. */
inline void Surf3A(double Val, const mglDataA &a, const mglDataA &b, const char *stl="", const char *opt="")
{ mgl_surf3a_val(gr, Val, &a, &b, stl, opt); }
/// Draw isosurfaces for 3d data specified parametrically with alpha proportional to b
/** Style ‘#’ draw wired plot. Style ‘.’ produce plot by dots.
* Option "value" set the number of isosurfaces (default is 3). */
inline void Surf3A(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const mglDataA &b, const char *stl="", const char *opt="")
{ mgl_surf3a_xyz(gr, &x, &y, &z, &a, &b, stl, opt); }
/// Draw isosurfaces for 3d data with alpha proportional to b
/** Style ‘#’ draw wired plot. Style ‘.’ produce plot by dots.
* Option "value" set the number of isosurfaces (default is 3). */
inline void Surf3A(const mglDataA &a, const mglDataA &b, const char *stl="", const char *opt="")
{ mgl_surf3a(gr, &a, &b, stl, opt); }
/// Draw isosurface for 3d data specified parametrically with color proportional to c
/** Style ‘#’ draw wired plot. Style ‘.’ produce plot by dots. */
inline void Surf3C(double Val, const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const mglDataA &c, const char *stl="", const char *opt="")
{ mgl_surf3c_xyz_val(gr, Val, &x, &y, &z, &a, &c, stl,opt); }
/// Draw isosurface for 3d data with color proportional to c
/** Style ‘#’ draw wired plot. Style ‘.’ produce plot by dots. */
inline void Surf3C(double Val, const mglDataA &a, const mglDataA &c, const char *stl="", const char *opt="")
{ mgl_surf3c_val(gr, Val, &a, &c, stl, opt); }
/// Draw isosurfaces for 3d data specified parametrically with color proportional to c
/** Style ‘#’ draw wired plot. Style ‘.’ produce plot by dots.
* Option "value" set the number of isosurfaces (default is 3). */
inline void Surf3C(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const mglDataA &c, const char *stl="", const char *opt="")
{ mgl_surf3c_xyz(gr, &x, &y, &z, &a, &c, stl, opt); }
/// Draw isosurfaces for 3d data specified parametrically with color proportional to c
/** Style ‘#’ draw wired plot. Style ‘.’ produce plot by dots.
* Option "value" set the number of isosurfaces (default is 3). */
inline void Surf3C(const mglDataA &a, const mglDataA &c, const char *stl="", const char *opt="")
{ mgl_surf3c(gr, &a, &c, stl, opt); }
/// Draw isosurface for 3d data specified parametrically with color proportional to c and alpha proportional to b
/** Style ‘#’ draw wired plot. Style ‘.’ produce plot by dots. */
inline void Surf3CA(double Val, const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const mglDataA &c, const mglDataA &b, const char *stl="", const char *opt="")
{ mgl_surf3ca_xyz_val(gr, Val, &x, &y, &z, &a, &c, &b, stl,opt); }
/// Draw isosurface for 3d data with color proportional to c and alpha proportional to b
/** Style ‘#’ draw wired plot. Style ‘.’ produce plot by dots. */
inline void Surf3CA(double Val, const mglDataA &a, const mglDataA &c, const mglDataA &b, const char *stl="", const char *opt="")
{ mgl_surf3ca_val(gr, Val, &a, &c, &b, stl, opt); }
/// Draw isosurfaces for 3d data specified parametrically with color proportional to c and alpha proportional to b
/** Style ‘#’ draw wired plot. Style ‘.’ produce plot by dots.
* Option "value" set the number of isosurfaces (default is 3). */
inline void Surf3CA(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const mglDataA &c, const mglDataA &b, const char *stl="", const char *opt="")
{ mgl_surf3ca_xyz(gr, &x, &y, &z, &a, &c, &b, stl, opt); }
/// Draw isosurfaces for 3d data with color proportional to c and alpha proportional to b
/** Style ‘#’ draw wired plot. Style ‘.’ produce plot by dots.
* Option "value" set the number of isosurfaces (default is 3). */
inline void Surf3CA(const mglDataA &a, const mglDataA &c, const mglDataA &b, const char *stl="", const char *opt="")
{ mgl_surf3ca(gr, &a, &c, &b, stl, opt); }
/// Plot dew drops for vector field {ax,ay} parametrically depended on coordinate {x,y}
inline void Dew(const mglDataA &x, const mglDataA &y, const mglDataA &ax, const mglDataA &ay, const char *sch="", const char *opt="")
{ mgl_dew_xy(gr, &x, &y, &ax, &ay, sch, opt); }
/// Plot dew drops for vector field {ax,ay}
inline void Dew(const mglDataA &ax, const mglDataA &ay, const char *sch="", const char *opt="")
{ mgl_dew_2d(gr, &ax, &ay, sch, opt); }
/// Plot vectors at position {x,y} along {ax,ay} with length/color proportional to |a|
/** Option value set the vector length factor (if non-zero) or vector length to be proportional the distance between curve points (if value=0). */
inline void Traj(const mglDataA &x, const mglDataA &y, const mglDataA &ax, const mglDataA &ay, const char *sch="", const char *opt="")
{ mgl_traj_xy(gr, &x, &y, &ax, &ay, sch, opt); }
/// Plot vectors at position {x,y,z} along {ax,ay,az} with length/color proportional to |a|
/** Option value set the vector length factor (if non-zero) or vector length to be proportional the distance between curve points (if value=0). */
inline void Traj(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &ax, const mglDataA &ay, const mglDataA &az, const char *sch="", const char *opt="")
{ mgl_traj_xyz(gr, &x, &y, &z, &ax, &ay, &az, sch, opt); }
/// Plot vector field {ax,ay} parametrically depended on coordinate {x,y} with length/color proportional to |a|
/** String \a sch may contain:
* ‘f’ for drawing arrows with fixed lengths,
* ‘ >’, ‘<’ for drawing arrows to or from the ce*ll point (default is centering),
* ‘.’ for drawing hachures with dots instead of arrows,
* ‘=’ for enabling color gradient along arrows. */
inline void Vect(const mglDataA &x, const mglDataA &y, const mglDataA &ax, const mglDataA &ay, const char *sch="", const char *opt="")
{ mgl_vect_xy(gr, &x, &y, &ax, &ay, sch, opt); }
/// Plot vector field {ax,ay} with length/color proportional to |a|
/** String \a sch may contain:
* ‘f’ for drawing arrows with fixed lengths,
* ‘ >’, ‘<’ for drawing arrows to or from the ce*ll point (default is centering),
* ‘.’ for drawing hachures with dots instead of arrows,
* ‘=’ for enabling color gradient along arrows. */
inline void Vect(const mglDataA &ax, const mglDataA &ay, const char *sch="", const char *opt="")
{ mgl_vect_2d(gr, &ax, &ay, sch, opt); }
/// Plot vector field {ax,ay,az} parametrically depended on coordinate {x,y,z} with length/color proportional to |a|
/** String \a sch may contain:
* ‘f’ for drawing arrows with fixed lengths,
* ‘ >’, ‘<’ for drawing arrows to or from the ce*ll point (default is centering),
* ‘.’ for drawing hachures with dots instead of arrows,
* ‘=’ for enabling color gradient along arrows. */
inline void Vect(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &ax, const mglDataA &ay, const mglDataA &az, const char *sch="", const char *opt="")
{ mgl_vect_xyz(gr, &x, &y, &z, &ax, &ay, &az, sch, opt); }
/// Plot vector field {ax,ay,az} with length/color proportional to |a|
/** String \a sch may contain:
* ‘f’ for drawing arrows with fixed lengths,
* ‘ >’, ‘<’ for drawing arrows to or from the ce*ll point (default is centering),
* ‘.’ for drawing hachures with dots instead of arrows,
* ‘=’ for enabling color gradient along arrows. */
inline void Vect(const mglDataA &ax, const mglDataA &ay, const mglDataA &az, const char *sch="", const char *opt="")
{ mgl_vect_3d(gr, &ax, &ay, &az, sch, opt); }
/// Draw vector plot along slice for 3d data specified parametrically
/** String \a sch may contain:
* ‘f’ for drawing arrows with fixed lengths,
* ‘ >’, ‘<’ for drawing arrows to or from the ce*ll point (default is centering),
* ‘.’ for drawing hachures with dots instead of arrows,
* ‘=’ for enabling color gradient along arrows,
* ‘ x’, ‘z’ for producing plot perpendicular to x- or z-direction correspondingly. */
inline void Vect3(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &ax, const mglDataA &ay, const mglDataA &az, const char *stl="", double sVal=-1, const char *opt="")
{ mgl_vect3_xyz(gr, &x, &y, &z, &ax,&ay,&az, stl, sVal, opt); }
/// Draw vector plot along slice for 3d data
/** String \a sch may contain:
* ‘f’ for drawing arrows with fixed lengths,
* ‘ >’, ‘<’ for drawing arrows to or from the ce*ll point (default is centering),
* ‘.’ for drawing hachures with dots instead of arrows,
* ‘=’ for enabling color gradient along arrows,
* ‘ x’, ‘z’ for producing plot perpendicular to x- or z-direction correspondingly. */
inline void Vect3(const mglDataA &ax, const mglDataA &ay, const mglDataA &az, const char *stl="", double sVal=-1, const char *opt="")
{ mgl_vect3(gr, &ax,&ay,&az, stl, sVal, opt); }
/// Plot flows for vector field {ax,ay} parametrically depended on coordinate {x,y} with color proportional to |a|
/** String \a sch may contain:
* color scheme: up-half (warm) corresponds to normal flow (like attractor), bottom-half (cold) corresponds to inverse flow (like source);
* ‘#’ for starting threads from edges only;
* ‘v’ for drawing arrows on the threads;
* ‘x’, ‘z’ for drawing tapes of normals in x-y and y-z planes correspondingly.
* Option "value" sets the number of threads (default is 5). */
inline void Flow(const mglDataA &x, const mglDataA &y, const mglDataA &ax, const mglDataA &ay, const char *sch="", const char *opt="")
{ mgl_flow_xy(gr, &x, &y, &ax, &ay, sch, opt); }
/// Plot flows for vector field {ax,ay} with color proportional to |a|
/** String \a sch may contain:
* color scheme: up-half (warm) corresponds to normal flow (like attractor), bottom-half (cold) corresponds to inverse flow (like source);
* ‘#’ for starting threads from edges only;
* ‘v’ for drawing arrows on the threads;
* ‘x’, ‘z’ for drawing tapes of normals in x-y and y-z planes correspondingly.
* Option "value" sets the number of threads (default is 5). */
inline void Flow(const mglDataA &ax, const mglDataA &ay, const char *sch="", const char *opt="")
{ mgl_flow_2d(gr, &ax, &ay, sch, opt); }
/// Plot flows for vector field {ax,ay,az} parametrically depended on coordinate {x,y,z} with color proportional to |a|
/** String \a sch may contain:
* color scheme: up-half (warm) corresponds to normal flow (like attractor), bottom-half (cold) corresponds to inverse flow (like source);
* ‘#’ for starting threads from edges only;
* ‘v’ for drawing arrows on the threads;
* ‘x’, ‘z’ for drawing tapes of normals in x-y and y-z planes correspondingly.
* Option "value" sets the number of threads (default is 5). */
inline void Flow(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &ax, const mglDataA &ay, const mglDataA &az, const char *sch="", const char *opt="")
{ mgl_flow_xyz(gr, &x, &y, &z, &ax, &ay, &az, sch, opt); }
/// Plot flows for vector field {ax,ay,az} with color proportional to |a|
/** String \a sch may contain:
* color scheme: up-half (warm) corresponds to normal flow (like attractor), bottom-half (cold) corresponds to inverse flow (like source);
* ‘#’ for starting threads from edges only;
* ‘v’ for drawing arrows on the threads;
* ‘x’, ‘z’ for drawing tapes of normals in x-y and y-z planes correspondingly.
* Option "value" sets the number of threads (default is 5). */
inline void Flow(const mglDataA &ax, const mglDataA &ay, const mglDataA &az, const char *sch="", const char *opt="")
{ mgl_flow_3d(gr, &ax, &ay, &az, sch, opt); }
/// Plot flow from point p for vector field {ax,ay} parametrically depended on coordinate {x,y} with color proportional to |a|
/** String \a sch may contain:
* color scheme: up-half (warm) corresponds to normal flow (like attractor), bottom-half (cold) corresponds to inverse flow (like source);
* ‘#’ for starting threads from edges only;
* ‘v’ for drawing arrows on the threads. */
inline void FlowP(mglPoint p, const mglDataA &x, const mglDataA &y, const mglDataA &ax, const mglDataA &ay, const char *sch="", const char *opt="")
{ mgl_flowp_xy(gr, p.x, p.y, p.z, &x, &y, &ax, &ay, sch, opt); }
/// Plot flow from point p for vector field {ax,ay} with color proportional to |a|
/** String \a sch may contain:
* color scheme: up-half (warm) corresponds to normal flow (like attractor), bottom-half (cold) corresponds to inverse flow (like source);
* ‘#’ for starting threads from edges only;
* ‘v’ for drawing arrows on the threads. */
inline void FlowP(mglPoint p, const mglDataA &ax, const mglDataA &ay, const char *sch="", const char *opt="")
{ mgl_flowp_2d(gr, p.x, p.y, p.z, &ax, &ay, sch, opt); }
/// Plot flow from point p for vector field {ax,ay,az} parametrically depended on coordinate {x,y,z} with color proportional to |a|
/** String \a sch may contain:
* color scheme: up-half (warm) corresponds to normal flow (like attractor), bottom-half (cold) corresponds to inverse flow (like source);
* ‘#’ for starting threads from edges only;
* ‘v’ for drawing arrows on the threads;
* ‘x’, ‘z’ for drawing tapes of normals in x-y and y-z planes correspondingly. */
inline void FlowP(mglPoint p, const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &ax, const mglDataA &ay, const mglDataA &az, const char *sch="", const char *opt="")
{ mgl_flowp_xyz(gr, p.x, p.y, p.z, &x, &y, &z, &ax, &ay, &az, sch, opt); }
/// Plot flow from point p for vector field {ax,ay,az} with color proportional to |a|
/** String \a sch may contain:
* color scheme: up-half (warm) corresponds to normal flow (like attractor), bottom-half (cold) corresponds to inverse flow (like source);
* ‘#’ for starting threads from edges only;
* ‘v’ for drawing arrows on the threads;
* ‘x’, ‘z’ for drawing tapes of normals in x-y and y-z planes correspondingly. */
inline void FlowP(mglPoint p, const mglDataA &ax, const mglDataA &ay, const mglDataA &az, const char *sch="", const char *opt="")
{ mgl_flowp_3d(gr, p.x, p.y, p.z, &ax, &ay, &az, sch, opt); }
/// Plot flows from given plain for vector field {ax,ay,az} parametrically depended on coordinate {x,y,z} with color proportional to |a|
/** String \a sch may contain:
* color scheme: up-half (warm) corresponds to normal flow (like attractor), bottom-half (cold) corresponds to inverse flow (like source);
* 'v' for drawing arrows on the threads;
* 't' for drawing tapes of normals in x-y and y-z planes.
* Option "value" sets the number of threads (default is 5). */
inline void Flow3(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &ax, const mglDataA &ay, const mglDataA &az, const char *sch="", double sVal=-1, const char *opt="")
{ mgl_flow3_xyz(gr, &x, &y, &z, &ax, &ay, &az, sch, sVal, opt); }
/// Plot flows from given plain for vector field {ax,ay,az} with color proportional to |a|
/** String \a sch may contain:
* color scheme: up-half (warm) corresponds to normal flow (like attractor), bottom-half (cold) corresponds to inverse flow (like source);
* 'v' for drawing arrows on the threads;
* 't' for drawing tapes of normals in x-y and y-z planes.
* Option "value" sets the number of threads (default is 5). */
inline void Flow3(const mglDataA &ax, const mglDataA &ay, const mglDataA &az, const char *sch="", double sVal=-1, const char *opt="")
{ mgl_flow3(gr, &ax, &ay, &az, sch, sVal, opt); }
/// Plot flows for gradient of scalar field phi parametrically depended on coordinate {x,y,z}
/** String \a sch may contain:
* color scheme: up-half (warm) corresponds to normal flow (like attractor), bottom-half (cold) corresponds to inverse flow (like source);
* ‘#’ for starting threads from edges only;
* ‘v’ for drawing arrows on the threads;
* ‘x’, ‘z’ for drawing tapes of normals in x-y and y-z planes correspondingly.
* Option "value" sets the number of threads (default is 5). */
inline void Grad(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &phi, const char *sch="", const char *opt="")
{ mgl_grad_xyz(gr,&x,&y,&z,&phi,sch,opt); }
/// Plot flows for gradient of scalar field phi parametrically depended on coordinate {x,y}
/** String \a sch may contain:
* color scheme: up-half (warm) corresponds to normal flow (like attractor), bottom-half (cold) corresponds to inverse flow (like source);
* ‘#’ for starting threads from edges only;
* ‘v’ for drawing arrows on the threads;
* ‘x’, ‘z’ for drawing tapes of normals in x-y and y-z planes correspondingly.
* Option "value" sets the number of threads (default is 5). */
inline void Grad(const mglDataA &x, const mglDataA &y, const mglDataA &phi, const char *sch="", const char *opt="")
{ mgl_grad_xy(gr,&x,&y,&phi,sch,opt); }
/// Plot flows for gradient of scalar field phi
/** String \a sch may contain:
* color scheme: up-half (warm) corresponds to normal flow (like attractor), bottom-half (cold) corresponds to inverse flow (like source);
* ‘#’ for starting threads from edges only;
* ‘v’ for drawing arrows on the threads;
* ‘x’, ‘z’ for drawing tapes of normals in x-y and y-z planes correspondingly.
* Option "value" sets the number of threads (default is 5). */
inline void Grad(const mglDataA &phi, const char *sch="", const char *opt="")
{ mgl_grad(gr,&phi,sch,opt); }
/// Plot flow pipes for vector field {ax,ay} parametrically depended on coordinate {x,y} with color and radius proportional to |a|
/** String \a sch may contain:
* color scheme: up-half (warm) corresponds to normal flow (like attractor), bottom-half (cold) corresponds to inverse flow (like source);
* ‘#’ for starting threads from edges only;
* ‘i’ for pipe radius to be inverse proportional to amplitude.
* Option "value" sets the number of threads (default is 5). */
inline void Pipe(const mglDataA &x, const mglDataA &y, const mglDataA &ax, const mglDataA &ay, const char *sch="", double r0=0.05, const char *opt="")
{ mgl_pipe_xy(gr, &x, &y, &ax, &ay, sch, r0, opt); }
/// Plot flow pipes for vector field {ax,ay} with color and radius proportional to |a|
/** String \a sch may contain:
* color scheme: up-half (warm) corresponds to normal flow (like attractor), bottom-half (cold) corresponds to inverse flow (like source);
* ‘#’ for starting threads from edges only;
* ‘i’ for pipe radius to be inverse proportional to amplitude.
* Option "value" sets the number of threads (default is 5). */
inline void Pipe(const mglDataA &ax, const mglDataA &ay, const char *sch="", double r0=0.05, const char *opt="")
{ mgl_pipe_2d(gr, &ax, &ay, sch, r0, opt); }
/// Plot flow pipes for vector field {ax,ay,az} parametrically depended on coordinate {x,y,z} with color and radius proportional to |a|
/** String \a sch may contain:
* color scheme: up-half (warm) corresponds to normal flow (like attractor), bottom-half (cold) corresponds to inverse flow (like source);
* ‘#’ for starting threads from edges only;
* ‘i’ for pipe radius to be inverse proportional to amplitude;
* ‘x’, ‘z’ for drawing tapes of normals in x-y and y-z planes correspondingly.
* Option "value" sets the number of threads (default is 5). */
inline void Pipe(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &ax, const mglDataA &ay, const mglDataA &az, const char *sch="", double r0=0.05, const char *opt="")
{ mgl_pipe_xyz(gr, &x, &y, &z, &ax, &ay, &az, sch, r0, opt); }
/// Plot flow pipes for vector field {ax,ay,az} with color and radius proportional to |a|
/** String \a sch may contain:
* color scheme: up-half (warm) corresponds to normal flow (like attractor), bottom-half (cold) corresponds to inverse flow (like source);
* ‘#’ for starting threads from edges only;
* ‘i’ for pipe radius to be inverse proportional to amplitude;
* ‘x’, ‘z’ for drawing tapes of normals in x-y and y-z planes correspondingly.
* Option "value" sets the number of threads (default is 5). */
inline void Pipe(const mglDataA &ax, const mglDataA &ay, const mglDataA &az, const char *sch="", double r0=0.05, const char *opt="")
{ mgl_pipe_3d(gr, &ax, &ay, &az, sch, r0, opt); }
/// Draw density plot for data at x = sVal
/** Style ‘#’ draw grid lines. Style ‘.’ produce plot by dots.*/
inline void DensX(const mglDataA &a, const char *stl="", double sVal=mglNaN, const char *opt="")
{ mgl_dens_x(gr, &a, stl, sVal, opt); }
/// Draw density plot for data at y = sVal
/** Style ‘#’ draw grid lines. Style ‘.’ produce plot by dots.*/
inline void DensY(const mglDataA &a, const char *stl="", double sVal=mglNaN, const char *opt="")
{ mgl_dens_y(gr, &a, stl, sVal, opt); }
/// Draw density plot for data at z = sVal
/** Style ‘#’ draw grid lines. Style ‘.’ produce plot by dots.*/
inline void DensZ(const mglDataA &a, const char *stl="", double sVal=mglNaN, const char *opt="")
{ mgl_dens_z(gr, &a, stl, sVal, opt); }
/// Draw contour lines for data at x = sVal
/** Style ‘t’/‘T’ draw contour labels below/above contours.
* Option "value" set the number of contour levels (default is 7). */
inline void ContX(const mglDataA &a, const char *stl="", double sVal=mglNaN, const char *opt="")
{ mgl_cont_x(gr, &a, stl, sVal, opt); }
/// Draw contour lines at manual levels for data at x = sVal
/** Style ‘t’/‘T’ draw contour labels below/above contours. */
inline void ContX(const mglDataA &v, const mglDataA &a, const char *stl="", double sVal=mglNaN, const char *opt="")
{ mgl_cont_x_val(gr, &v, &a, stl, sVal, opt); }
/// Draw contour lines for data at y = sVal
/** Style ‘t’/‘T’ draw contour labels below/above contours.
* Option "value" set the number of contour levels (default is 7). */
inline void ContY(const mglDataA &a, const char *stl="", double sVal=mglNaN, const char *opt="")
{ mgl_cont_y(gr, &a, stl, sVal, opt); }
/// Draw contour lines at manual levels for data at y = sVal
/** Style ‘t’/‘T’ draw contour labels below/above contours. */
inline void ContY(const mglDataA &v, const mglDataA &a, const char *stl="", double sVal=mglNaN, const char *opt="")
{ mgl_cont_y_val(gr, &v, &a, stl, sVal, opt); }
/// Draw contour lines for data at z = sVal
/** Style ‘t’/‘T’ draw contour labels below/above contours.
* Option "value" set the number of contour levels (default is 7). */
inline void ContZ(const mglDataA &a, const char *stl="", double sVal=mglNaN, const char *opt="")
{ mgl_cont_z(gr, &a, stl, sVal, opt); }
/// Draw contour lines at manual levels for data at z = sVal
/** Style ‘t’/‘T’ draw contour labels below/above contours. */
inline void ContZ(const mglDataA &v, const mglDataA &a, const char *stl="", double sVal=mglNaN, const char *opt="")
{ mgl_cont_z_val(gr, &v, &a, stl, sVal, opt); }
/// Draw solid contours for data at x = sVal
/** Option "value" set the number of contour levels (default is 7). */
inline void ContFX(const mglDataA &a, const char *stl="", double sVal=mglNaN, const char *opt="")
{ mgl_contf_x(gr, &a, stl, sVal, opt); }
/// Draw solid contours at manual levels for data at x = sVal
inline void ContFX(const mglDataA &v, const mglDataA &a, const char *stl="", double sVal=mglNaN, const char *opt="")
{ mgl_contf_x_val(gr, &v, &a, stl, sVal, opt); }
/// Draw solid contours for data at y = sVal
/** Option "value" set the number of contour levels (default is 7). */
inline void ContFY(const mglDataA &a, const char *stl="", double sVal=mglNaN, const char *opt="")
{ mgl_contf_y(gr, &a, stl, sVal, opt); }
/// Draw solid contours at manual levels for data at y = sVal
inline void ContFY(const mglDataA &v, const mglDataA &a, const char *stl="", double sVal=mglNaN, const char *opt="")
{ mgl_contf_y_val(gr, &v, &a, stl, sVal, opt); }
/// Draw solid contours for data at z = sVal
/** Option "value" set the number of contour levels (default is 7). */
inline void ContFZ(const mglDataA &a, const char *stl="", double sVal=mglNaN, const char *opt="")
{ mgl_contf_z(gr, &a, stl, sVal, opt); }
/// Draw solid contours at manual levels for data at z = sVal
inline void ContFZ(const mglDataA &v, const mglDataA &a, const char *stl="", double sVal=mglNaN, const char *opt="")
{ mgl_contf_z_val(gr, &v, &a, stl, sVal, opt); }
/// Draw curve for formula with x in x-axis range
/** Option "value" set initial number of points. */
inline void FPlot(const char *fy, const char *stl="", const char *opt="")
{ mgl_fplot(gr, fy, stl, opt); }
/// Draw curve for formulas parametrically depended on t in range [0,1]
/** Option "value" set initial number of points. */
inline void FPlot(const char *fx, const char *fy, const char *fz, const char *stl, const char *opt="")
{ mgl_fplot_xyz(gr, fx, fy, fz, stl, opt); }
/// Draw surface by formula with x,y in axis range
/** Option "value" set initial number of points. */
inline void FSurf(const char *fz, const char *stl="", const char *opt="")
{ mgl_fsurf(gr, fz, stl, opt); }
/// Draw surface by formulas parametrically depended on u,v in range [0,1]
/** Option "value" set initial number of points. */
inline void FSurf(const char *fx, const char *fy, const char *fz, const char *stl, const char *opt="")
{ mgl_fsurf_xyz(gr, fx, fy, fz, stl, opt); }
/// Draw triangle mesh for points in arrays {x,y,z} with specified color c.
/** Style ‘#’ produce wire plot. If id.ny=c.nx then c set the triangle colors, else vertex colors. */
inline void TriPlot(const mglDataA &nums, const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &c, const char *sch="", const char *opt="")
{ mgl_triplot_xyzc(gr, &nums, &x, &y, &z, &c, sch, opt); }
/// Draw triangle mesh for points in arrays {x,y,z}
/** Style ‘#’ produce wire plot. */
inline void TriPlot(const mglDataA &nums, const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_triplot_xyz(gr, &nums, &x, &y, &z, sch, opt); }
/// Draw triangle mesh for points in arrays {x,y}
/** Style ‘#’ produce wire plot. */
inline void TriPlot(const mglDataA &nums, const mglDataA &x, const mglDataA &y, const char *sch="", const char *opt="")
{ mgl_triplot_xy(gr, &nums, &x, &y, sch, opt); }
/// Draw quad mesh for points in arrays {x,y,z} with specified color c
/** Style ‘#’ produce wire plot. If id.ny=c.nx then c set the quadrangle colors, else vertex colors. */
inline void QuadPlot(const mglDataA &nums, const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &c, const char *sch="", const char *opt="")
{ mgl_quadplot_xyzc(gr, &nums, &x, &y, &z, &c, sch, opt); }
/// Draw quad mesh for points in arrays {x,y,z}
/** Style ‘#’ produce wire plot. */
inline void QuadPlot(const mglDataA &nums, const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_quadplot_xyz(gr, &nums, &x, &y, &z, sch, opt); }
/// Draw quad mesh for points in arrays {x,y}
/** Style ‘#’ produce wire plot. */
inline void QuadPlot(const mglDataA &nums, const mglDataA &x, const mglDataA &y, const char *sch="", const char *opt="")
{ mgl_quadplot_xy(gr, &nums, &x, &y, sch, opt); }
/// Draw contour lines for triangle mesh for points in arrays {x,y,z}
/** Style ‘_’ to draw contours at bottom of axis box.
* Style ‘t’/‘T’ draw contour labels below/above contours.
* If id.ny=c.nx then c set the quadrangle colors, else vertex colors. */
inline void TriCont(const mglDataA &nums, const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_tricont_xyc(gr, &nums, &x, &y, &z, sch, opt); }
/// Draw contour lines for triangle mesh for points in arrays {x,y,z}
/** Style ‘_’ to draw contours at bottom of axis box.
* Style ‘t’/‘T’ draw contour labels below/above contours.
* If id.ny=c.nx then c set the quadrangle colors, else vertex colors.
* Option "value" set the number of contour levels (default is 7). */
inline void TriContV(const mglDataA &v, const mglDataA &nums, const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_tricont_xycv(gr, &v, &nums, &x, &y, &z, sch, opt); }
/// Draw contour lines for triangle mesh for points in arrays {x,y,z} with specified color c.
/** Style ‘_’ to draw contours at bottom of axis box.
* Style ‘t’/‘T’ draw contour labels below/above contours.
* If id.ny=c.nx then c set the quadrangle colors, else vertex colors. */
inline void TriCont(const mglDataA &nums, const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *sch="", const char *opt="")
{ mgl_tricont_xyzc(gr, &nums, &x, &y, &z, &a, sch, opt); }
/// Draw contour lines for triangle mesh for points in arrays {x,y,z} with specified color c.
/** Style ‘_’ to draw contours at bottom of axis box.
* Style ‘t’/‘T’ draw contour labels below/above contours.
* If id.ny=c.nx then c set the quadrangle colors, else vertex colors. */
inline void TriContV(const mglDataA &v, const mglDataA &nums, const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *sch="", const char *opt="")
{ mgl_tricont_xyzcv(gr, &v, &nums, &x, &y, &z, &a, sch, opt); }
/// Draw contour lines for triangle mesh for points in arrays {x,y,z} with specified color c.
/** Style ‘_’ to draw contours at bottom of axis box.
* Style ‘t’/‘T’ draw contour labels below/above contours.
* If id.ny=c.nx then c set the quadrangle colors, else vertex colors. */
inline void TriCont(const mglDataA &v, const mglDataA &nums, const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *sch="", const char *opt="")
{ mgl_tricont_xyzcv(gr, &v, &nums, &x, &y, &z, &a, sch, opt); }
/// Draw contour tubes for triangle mesh for points in arrays {x,y,z}
/** Option "value" set the number of contour levels (default is 7). */
inline void TriContVt(const mglDataA &nums, const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_tricontv_xyc(gr, &nums, &x, &y, &z, sch, opt); }
/// Draw contour tubes for triangle mesh for points in arrays {x,y,z} with specified color c
/** Option "value" set the number of contour levels (default is 7). */
inline void TriContVt(const mglDataA &nums, const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *sch="", const char *opt="")
{ mgl_tricontv_xyzc(gr, &nums, &x, &y, &z, &a, sch, opt); }
/// Draw contour tubes for triangle mesh for points in arrays {x,y,z} with specified color c
/** If id.ny=c.nx then c set the quadrangle colors, else vertex colors. */
inline void TriContVt(const mglDataA &v, const mglDataA &nums, const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *sch="", const char *opt="")
{ mgl_tricontv_xyzcv(gr, &v, &nums, &x, &y, &z, &a, sch, opt); }
/// Draw dots in points {x,y,z}.
inline void Dots(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_dots(gr, &x, &y, &z, sch, opt); }
/// Draw semitransparent dots in points {x,y,z} with specified alpha a.
inline void Dots(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *sch="", const char *opt="")
{ mgl_dots_a(gr, &x, &y, &z, &a, sch, opt); }
/// Draw semitransparent dots in points {x,y,z} with specified color c and alpha a.
inline void Dots(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &c, const mglDataA &a, const char *sch="", const char *opt="")
{ mgl_dots_ca(gr, &x, &y, &z, &c, &a, sch, opt); }
/// Draw surface reconstructed for points in arrays {x,y,z}.
/** Style ‘#’ produce wired plot. */
inline void Crust(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *sch="", const char *opt="")
{ mgl_crust(gr, &x, &y, &z, sch, opt); }
/// Fit data along x-direction for each data row. Return array with values for found formula.
inline mglData Fit(const mglDataA &y, const char *eq, const char *vars, const char *opt="")
{ return mglData(true,mgl_fit_1(gr, &y, eq,vars,0, opt)); }
/// Fit data along x-direction for each data row starting from \a ini values. Return array with values for found formula.
inline mglData Fit(const mglDataA &y, const char *eq, const char *vars, mglData &ini, const char *opt="")
{ return mglData(true,mgl_fit_1(gr, &y, eq, vars, &ini, opt)); }
/// Fit data along x-, y-directions for each data slice. Return array with values for found formula.
inline mglData Fit2(const mglDataA &z, const char *eq, const char *vars, const char *opt="")
{ return mglData(true,mgl_fit_2(gr, &z, eq, vars,0, opt)); }
/// Fit data along x-, y-direction for each data slice starting from \a ini values. Return array with values for found formula.
inline mglData Fit2(const mglDataA &z, const char *eq, const char *vars, mglData &ini, const char *opt="")
{ return mglData(true,mgl_fit_2(gr, &z, eq, vars, &ini, opt)); }
/// Fit data along along all directions. Return array with values for found formula.
inline mglData Fit3(const mglDataA &a, const char *eq, const char *vars, const char *opt="")
{ return mglData(true,mgl_fit_3(gr, &a, eq, vars,0, opt)); }
/// Fit data along all directions starting from \a ini values. Return array with values for found formula.
inline mglData Fit3(const mglDataA &a, const char *eq, const char *vars, mglData &ini, const char *opt="")
{ return mglData(true,mgl_fit_3(gr, &a, eq, vars, &ini, opt)); }
/// Fit data along x-direction for each data row. Return array with values for found formula.
inline mglData Fit(const mglDataA &x, const mglDataA &y, const char *eq, const char *vars, const char *opt="")
{ return mglData(true,mgl_fit_xy(gr, &x, &y, eq, vars,0, opt)); }
/// Fit data along x-direction for each data row starting from \a ini values. Return array with values for found formula.
inline mglData Fit(const mglDataA &x, const mglDataA &y, const char *eq, const char *vars, mglData &ini, const char *opt="")
{ return mglData(true,mgl_fit_xy(gr, &x, &y, eq, vars, &ini, opt)); }
/// Fit data along x-, y-directions for each data slice. Return array with values for found formula.
inline mglData Fit(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *eq, const char *vars, const char *opt="")
{ return mglData(true,mgl_fit_xyz(gr, &x, &y, &z, eq, vars,0, opt)); }
/// Fit data along x-, y-directions for each data slice starting from \a ini values. Return array with values for found formula.
inline mglData Fit(const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *eq, const char *vars, mglData &ini, const char *opt="")
{ return mglData(true,mgl_fit_xyz(gr, &x, &y, &z, eq, vars, &ini, opt)); }
/// Fit data along along all directions. Return array with values for found formula.
inline mglData Fit(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *eq, const char *vars, const char *opt="")
{ return mglData(true,mgl_fit_xyza(gr, &x, &y, &z, &a, eq, vars,0, opt)); }
/// Fit data along along all directions starting from \a ini values. Return array with values for found formula.
inline mglData Fit(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *eq, const char *vars, mglData &ini, const char *opt="")
{ return mglData(true,mgl_fit_xyza(gr, &x, &y, &z, &a, eq,vars, &ini, opt)); }
/// Fit data with dispersion s along x-direction for each data row. Return array with values for found formula.
inline mglData FitS(const mglDataA &y, const mglDataA &s, const char *eq, const char *vars, const char *opt="")
{ return mglData(true,mgl_fit_ys(gr, &y, &s, eq, vars,0, opt)); }
/// Fit data with dispersion s along x-direction for each data row starting from \a ini values. Return array with values for found formula.
inline mglData FitS(const mglDataA &y, const mglDataA &s, const char *eq, const char *vars, mglData &ini, const char *opt="")
{ return mglData(true,mgl_fit_ys(gr, &y, &s, eq, vars, &ini, opt)); }
/// Fit data with dispersion s along x-direction for each data row. Return array with values for found formula.
inline mglData FitS(const mglDataA &x, const mglDataA &y, const mglDataA &s, const char *eq, const char *vars, const char *opt="")
{ return mglData(true,mgl_fit_xys(gr, &x, &y, &s, eq, vars,0, opt)); }
/// Fit data with dispersion s along x-direction for each data row starting from \a ini values. Return array with values for found formula.
inline mglData FitS(const mglDataA &x, const mglDataA &y, const mglDataA &s, const char *eq, const char *vars, mglData &ini, const char *opt="")
{ return mglData(true,mgl_fit_xys(gr, &x, &y, &s, eq, vars, &ini, opt)); }
/// Fit data with dispersion s along x-, y-directions for each data slice. Return array with values for found formula.
inline mglData FitS(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &s, const char *eq, const char *vars, const char *opt="")
{ return mglData(true,mgl_fit_xyzs(gr, &x, &y, &z, &s, eq, vars,0, opt)); }
/// Fit data with dispersion s along x-, y-directions for each data slice starting from \a ini values. Return array with values for found formula.
inline mglData FitS(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &s, const char *eq, const char *vars, mglData &ini, const char *opt="")
{ return mglData(true,mgl_fit_xyzs(gr, &x, &y, &z, &s, eq, vars, &ini, opt)); }
/// Fit data with dispersion s along all directions. Return array with values for found formula.
inline mglData FitS(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const mglDataA &s, const char *eq, const char *vars, const char *opt="")
{ return mglData(true,mgl_fit_xyzas(gr, &x, &y, &z, &a, &s, eq, vars,0, opt)); }
/// Fit data with dispersion s along all directions starting from \a ini values. Return array with values for found formula.
inline mglData FitS(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const mglDataA &s, const char *eq, const char *vars, mglData &ini, const char *opt="")
{ return mglData(true,mgl_fit_xyzas(gr, &x, &y, &z, &a, &s, eq, vars, &ini, opt)); }
/// Print fitted last formula (with coefficients)
inline void PutsFit(mglPoint p, const char *prefix=0, const char *font="", double size=-1)
{ mgl_puts_fit(gr, p.x, p.y, p.z, prefix, font, size); }
/// Get last fitted formula
inline const char *GetFit() const
{ return mgl_get_fit(gr); }
/// Get chi for last fitted formula
static inline mreal GetFitChi()
{ return mgl_get_fit_chi(); }
/// Get covariance matrix for last fitted formula
static inline mglData GetFitCovar()
{ return mglData(mgl_get_fit_covar()); }
/// Solve PDE with x,y,z in range axis range
inline mglData PDE(const char *ham, const mglDataA &ini_re, const mglDataA &ini_im, double dz=0.1, double k0=100, const char *opt="")
{ return mglData(true,mgl_pde_solve(gr,ham,&ini_re,&ini_im,dz,k0, opt)); }
/// Solve PDE with x,y,z in range axis range
inline mglDataC PDEc(const char *ham, const mglDataA &ini_re, const mglDataA &ini_im, double dz=0.1, double k0=100, const char *opt="")
{ return mglDataC(true,mgl_pde_solve_c(gr,ham,&ini_re,&ini_im,dz,k0, opt)); }
/// Solve PDE with x,y,z in range axis range using advanced (slow!!!) method (2d only)
inline mglData APDE(const char *ham, const mglDataA &ini_re, const mglDataA &ini_im, double dz=0.1, double k0=100, const char *opt="")
{ return mglData(true,mgl_pde_adv(gr,ham,&ini_re,&ini_im,dz,k0, opt)); }
/// Solve PDE with x,y,z in range axis range using advanced (slow!!!) method (2d only)
inline mglDataC APDEc(const char *ham, const mglDataA &ini_re, const mglDataA &ini_im, double dz=0.1, double k0=100, const char *opt="")
{ return mglDataC(true,mgl_pde_adv_c(gr,ham,&ini_re,&ini_im,dz,k0, opt)); }
/// Fill data by formula with x,y,z in range axis range
inline void Fill(mglData &u, const char *eq, const char *opt="")
{ mgl_data_fill_eq(gr, &u, eq, 0, 0, opt); }
inline void Fill(mglData &u, const char *eq, const mglDataA &v, const char *opt="")
{ mgl_data_fill_eq(gr, &u, eq, &v, 0, opt); }
inline void Fill(mglData &u, const char *eq, const mglDataA &v, const mglDataA &w, const char *opt="")
{ mgl_data_fill_eq(gr, &u, eq, &v, &w, opt); }
/// Fill data by formula with x,y,z in range axis range
inline void Fill(mglDataC &u, const char *eq, const char *opt="")
{ mgl_datac_fill_eq(gr, &u, eq, 0, 0, opt); }
inline void Fill(mglDataC &u, const char *eq, const mglDataA &v, const char *opt="")
{ mgl_datac_fill_eq(gr, &u, eq, &v, 0, opt); }
inline void Fill(mglDataC &u, const char *eq, const mglDataA &v, const mglDataA &w, const char *opt="")
{ mgl_datac_fill_eq(gr, &u, eq, &v, &w, opt); }
/// Fill dat by interpolated values of vdat parametrically depended on xdat for x in axis range
inline void Refill(mglData &dat, const mglDataA &xdat, const mglDataA &vdat, long sl=-1, const char *opt="")
{ mgl_data_refill_gr(gr,&dat,&xdat,0,0,&vdat,sl,opt); }
/// Fill dat by interpolated values of vdat parametrically depended on xdat,ydat for x,y in axis range
inline void Refill(mglData &dat, const mglDataA &xdat, const mglDataA &ydat, const mglDataA &vdat, long sl=-1, const char *opt="")
{ mgl_data_refill_gr(gr,&dat,&xdat,&ydat,0,&vdat,sl,opt); }
/// Fill dat by interpolated values of vdat parametrically depended on xdat,ydat,zdat for x,y,z in axis range
inline void Refill(mglData &dat, const mglDataA &xdat, const mglDataA &ydat, const mglDataA &zdat, const mglDataA &vdat, const char *opt="")
{ mgl_data_refill_gr(gr,&dat,&xdat,&ydat,&zdat,&vdat,-1,opt); }
/// Fill dat by interpolated values of vdat parametrically depended on xdat for x in axis range
inline void Refill(mglDataC &dat, const mglDataA &xdat, const mglDataA &vdat, long sl=-1, const char *opt="")
{ mgl_datac_refill_gr(gr,&dat,&xdat,0,0,&vdat,sl,opt); }
/// Fill dat by interpolated values of vdat parametrically depended on xdat,ydat for x,y in axis range
inline void Refill(mglDataC &dat, const mglDataA &xdat, const mglDataA &ydat, const mglDataA &vdat, long sl=-1, const char *opt="")
{ mgl_datac_refill_gr(gr,&dat,&xdat,&ydat,0,&vdat,sl,opt); }
/// Fill dat by interpolated values of vdat parametrically depended on xdat,ydat,zdat for x,y,z in axis range
inline void Refill(mglDataC &dat, const mglDataA &xdat, const mglDataA &ydat, const mglDataA &zdat, const mglDataA &vdat, const char *opt="")
{ mgl_datac_refill_gr(gr,&dat,&xdat,&ydat,&zdat,&vdat,-1,opt); }
/// Set the data by triangulated surface values assuming x,y,z in range axis range
inline void DataGrid(mglData &d, const mglDataA &x, const mglDataA &y, const mglDataA &z, const char *opt="")
{ mgl_data_grid(gr,&d,&x,&y,&z,opt); }
/// Make histogram (distribution) of data. This function do not plot data.
/** Option "value" sets the size of output array (default is mglFitPnts=100). */
inline mglData Hist(const mglDataA &x, const mglDataA &a, const char *opt="")
{ return mglData(true, mgl_hist_x(gr, &x, &a, opt)); }
/// Make histogram (distribution) of data. This function do not plot data.
/** Option "value" sets the size of output array (default is mglFitPnts=100). */
inline mglData Hist(const mglDataA &x, const mglDataA &y, const mglDataA &a, const char *opt="")
{ return mglData(true, mgl_hist_xy(gr, &x, &y, &a, opt)); }
/// Make histogram (distribution) of data. This function do not plot data.
/** Option "value" sets the size of output array (default is mglFitPnts=100). */
inline mglData Hist(const mglDataA &x, const mglDataA &y, const mglDataA &z, const mglDataA &a, const char *opt="")
{ return mglData(true, mgl_hist_xyz(gr, &x, &y, &z, &a, opt)); }
inline void Compression(bool){} // NOTE: Add later -- IDTF
/// Set the preference for vertex color on/off (for formats that support it, now only PRC does).
inline void VertexColor(bool enable) { mgl_set_flag(gr,enable, MGL_PREFERVC); }
/// Render only front side of surfaces for dubugging purposes (for formats that support it, now only PRC does).
inline void DoubleSided(bool enable) { mgl_set_flag(gr,!enable, MGL_ONESIDED); }
// inline void TextureColor(bool){} // NOTE: Add later -- IDTF
};
//-----------------------------------------------------------------------------
/// Wrapper class for MGL parsing
class MGL_EXPORT mglParse
{
HMPR pr;
mglParse &operator=(mglParse &p)
{ pr = p.pr; mgl_use_parser(pr,1); return p; }
public:
mglParse(HMPR p) { pr = p; mgl_use_parser(pr,1); }
mglParse(mglParse &p) { pr = p.pr; mgl_use_parser(pr,1); }
mglParse(bool setsize=false)
{ pr=mgl_create_parser(); mgl_parser_allow_setsize(pr, setsize); }
virtual ~mglParse()
{
#pragma omp critical
if(mgl_use_parser(pr,-1)<1) mgl_delete_parser(pr);
}
/// Get pointer to internal mglParser object
inline HMPR Self() { return pr; }
/// Parse and draw single line of the MGL script
inline int Parse(mglGraph *gr, const char *str, int pos)
{ return mgl_parse_line(gr->Self(), pr, str, pos); }
inline int Parse(mglGraph *gr, const wchar_t *str, int pos)
{ return mgl_parse_linew(gr->Self(), pr, str, pos); }
/// Execute MGL script text with '\n' separated lines
inline void Execute(mglGraph *gr, const char *str)
{ mgl_parse_text(gr->Self(), pr, str); }
inline void Execute(mglGraph *gr, const wchar_t *str)
{ mgl_parse_textw(gr->Self(), pr, str); }
/// Execute and draw script from the file
inline void Execute(mglGraph *gr, FILE *fp, bool print=false)
{ mgl_parse_file(gr->Self(), pr, fp, print); }
/// Return type of command: 0 - not found, 1 - other data plot, 2 - func plot,
/// 3 - setup, 4 - data handle, 5 - data create, 6 - subplot, 7 - program
/// 8 - 1d plot, 9 - 2d plot, 10 - 3d plot, 11 - dd plot, 12 - vector plot
/// 13 - axis, 14 - primitives, 15 - axis setup, 16 - text/legend, 17 - data transform
inline int CmdType(const char *name)
{ return mgl_parser_cmd_type(pr, name); }
/// Return string of command format (command name and its argument[s])
inline const char *CmdFormat(const char *name)
{ return mgl_parser_cmd_frmt(pr, name); }
/// Return description of MGL command
inline const char *CmdDesc(const char *name)
{ return mgl_parser_cmd_desc(pr, name); }
/// Get name of command with number n
inline const char *GetCmdName(long n)
{ return mgl_parser_cmd_name(pr,n); }
/// Get number of defined commands
inline long GetCmdNum()
{ return mgl_parser_cmd_num(pr); }
/// Load new commands from external dynamic Library (must have "const mglCommand *mgl_cmd_extra" variable)
inline void LoadDLL(const char *fname)
{ mgl_parser_load(pr, fname); }
/// Apply one step for equation d vars[i]/dt = eqs[i] using Runge-Kutta method
inline void RK_Step(const char *eqs, const char *vars, mreal dt=1)
{ mgl_rk_step(pr, eqs, vars, dt); }
inline void RK_Step(const wchar_t *eqs, const wchar_t *vars, mreal dt=1)
{ mgl_rk_step_w(pr, eqs, vars, dt); }
// Open all data arrays from HDF file and assign it as variables of parser p
inline void OpenHDF(const char *fname)
{ mgl_parser_openhdf(pr, fname); }
/// Set value for parameter $N
inline void AddParam(int id, const char *str)
{ mgl_parser_add_param(pr, id, str); }
inline void AddParam(int id, const wchar_t *str)
{ mgl_parser_add_paramw(pr, id, str); }
/// Restore once flag
inline void RestoreOnce() { mgl_parser_restore_once(pr); }
/// Allow changing size of the picture
inline void AllowSetSize(bool allow) { mgl_parser_allow_setsize(pr, allow); }
/// Allow reading/saving files
inline void AllowFileIO(bool allow) { mgl_parser_allow_file_io(pr, allow); }
/// Allow loading commands from external libraries
inline void AllowDllCall(bool allow) { mgl_parser_allow_dll_call(pr, allow); }
/// Set flag to stop script parsing
inline void Stop() { mgl_parser_stop(pr); }
/// Set variant of argument(s) separated by '?' to be used in further commands
inline void SetVariant(int var=0)
{ mgl_parser_variant(pr, var); }
/// Set starting object ID
inline void StartID(int id=0)
{ mgl_parser_start_id(pr, id); }
/// Return result of formula evaluation
inline mglData Calc(const char *formula)
{ return mglData(true,mgl_parser_calc(pr,formula)); }
inline mglData Calc(const wchar_t *formula)
{ return mglData(true,mgl_parser_calcw(pr,formula)); }
/// Return result of formula evaluation as complex data
inline mglDataC CalcComplex(const char *formula)
{ return mglDataC(true,mgl_parser_calc_complex(pr,formula)); }
inline mglDataC CalcComplex(const wchar_t *formula)
{ return mglDataC(true,mgl_parser_calc_complexw(pr,formula)); }
/// Find variable with given name or add a new one
/// NOTE !!! You must not delete obtained data arrays !!!
inline mglDataA *AddVar(const char *name)
{ return mgl_parser_add_var(pr, name); }
inline mglDataA *AddVar(const wchar_t *name)
{ return mgl_parser_add_varw(pr, name); }
/// Find variable with given name or return NULL if no one
/// NOTE !!! You must not delete obtained data arrays !!!
inline mglDataA *FindVar(const char *name)
{ return mgl_parser_find_var(pr, name); }
inline mglDataA *FindVar(const wchar_t *name)
{ return mgl_parser_find_varw(pr, name); }
/// Get variable with given id. Can be NULL for temporary ones.
/// NOTE !!! You must not delete obtained data arrays !!!
inline mglDataA *GetVar(unsigned long id)
{ return mgl_parser_get_var(pr,id); }
/// Get number of variables
inline long GetNumVar()
{ return mgl_parser_num_var(pr); }
/// Delete variable with name
inline void DeleteVar(const char *name) { mgl_parser_del_var(pr, name); }
inline void DeleteVar(const wchar_t *name) { mgl_parser_del_varw(pr, name); }
/// Delete all data variables
void DeleteAll() { mgl_parser_del_all(pr); }
/// Get constant with given id. Can be NULL if not found.
/// NOTE !!! You must not delete obtained data arrays !!!
inline mglNum *GetConst(unsigned long id)
{ return mgl_parser_get_const(pr,id); }
/// Get number of constants
inline long GetNumConst()
{ return mgl_parser_num_const(pr); }
};
//-----------------------------------------------------------------------------
#endif
#endif
|
memcpy-tutorial.c | // -----------------------------------------------------------------------------
//
// "CAPIPrecis"
//
// -----------------------------------------------------------------------------
// Copyright (c) 2014-2019 All rights reserved
// -----------------------------------------------------------------------------
// Author : Abdullah Mughrabi
// Email : atmughra@ncsu.edu||atmughrabi@gmail.com
// File : memcpy.c
// Create : 2019-09-28 14:41:30
// Revise : 2019-11-29 11:17:40
// Editor : Abdullah Mughrabi
// -----------------------------------------------------------------------------
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
#include <omp.h>
#include "mt19937.h"
#include "timer.h"
#include "myMalloc.h"
#include "config.h"
//CAPI
#include "libcxl.h"
#include "capienv.h"
#include "memcpy-tutorial.h"
struct DataArraysTut *newDataArraysTut(struct Arguments *arguments){
struct DataArraysTut *dataArraysTut = (struct DataArraysTut *) my_malloc(sizeof(struct DataArraysTut));
dataArraysTut->size = arguments->size;
dataArraysTut->array_send = (uint32_t *) my_malloc(sizeof(uint32_t)* (dataArraysTut->size));
dataArraysTut->array_receive = (uint32_t *) my_malloc(sizeof(uint32_t)* (dataArraysTut->size));
return dataArraysTut;
}
void freeDataArraysTut(struct DataArraysTut *dataArraysTut){
if(dataArraysTut){
if(dataArraysTut->array_send)
free(dataArraysTut->array_send);
if(dataArraysTut->array_receive)
free(dataArraysTut->array_receive);
free(dataArraysTut);
}
}
void initializeDataArraysTut(struct DataArraysTut *dataArraysTut){
uint64_t i;
#pragma omp parallel for
for(i = 0; i < dataArraysTut->size; i++)
{
dataArraysTut->array_send[i] = i;
dataArraysTut->array_receive[i] = 0;
}
}
void copyDataArraysTut(struct DataArraysTut *dataArraysTut, struct Arguments *arguments){
// uint64_t i;
// #pragma omp parallel for
// for(i = 0; i < dataArraysTut->size; i++)
// {
// //generate READ_CL_NA array_send[i] // read engine
// //generate WRITE_CL array_receive[i] // write engine
// dataArraysTut->array_receive[i] = dataArraysTut->array_send[i];
// }
struct cxl_afu_h *afu;
// ********************************************************************************************
// *************** MAP CSR DataStructure **************
// ********************************************************************************************
struct WEDStructTut *wed = mapDataArraysTutToWED(dataArraysTut);
// ********************************************************************************************
// *************** Setup AFU **************
// ********************************************************************************************
setupAFUTut(&afu, wed);
struct AFUStatus afu_status = {0};
afu_status.afu_config = arguments->afu_config;
afu_status.afu_config_2 = arguments->afu_config_2;
afu_status.cu_config = arguments->cu_config; // non zero CU triggers the AFU to work
afu_status.cu_config = ((afu_status.cu_config << 24) | (arguments->numThreads));
afu_status.cu_config_2 = afu_status.cu_config_2;
afu_status.cu_config_3 = 1 ;
afu_status.cu_config_4 = 1 ;
afu_status.cu_stop = wed->size_send;
startAFU(&afu, &afu_status);
// ********************************************************************************************
// *************** START AFU **************
// ********************************************************************************************
startCU(&afu, &afu_status);
// ********************************************************************************************
// *************** WAIT AFU **************
// ********************************************************************************************
waitAFU(&afu, &afu_status);
printMMIO_error(afu_status.error);
releaseAFU(&afu);
free(wed);
}
uint64_t compareDataArraysTut(struct DataArraysTut *dataArraysTut){
uint64_t missmatch = 0;
uint64_t i;
#pragma omp parallel for shared(dataArraysTut) reduction(+: missmatch)
for(i = 0; i < dataArraysTut->size; i++)
{
if(dataArraysTut->array_receive[i] != dataArraysTut->array_send[i]){
// printf("[%llu] %u != %u\n",i , dataArraysTut->array_receive[i], dataArraysTut->array_send[i] );
missmatch ++;
}
}
return missmatch;
} |
J1OrbitalSoA.h | //////////////////////////////////////////////////////////////////////////////////////
// This file is distributed under the University of Illinois/NCSA Open Source License.
// See LICENSE file in top directory for details.
//
// Copyright (c) 2016 Jeongnim Kim and QMCPACK developers.
//
// File developed by:
//
// File created by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp.
//////////////////////////////////////////////////////////////////////////////////////
// -*- C++ -*-
#ifndef QMCPLUSPLUS_ONEBODYJASTROW_OPTIMIZED_SOA_H
#define QMCPLUSPLUS_ONEBODYJASTROW_OPTIMIZED_SOA_H
#include "Configuration.h"
#include "QMCWaveFunctions/WaveFunctionComponent.h"
#include "QMCWaveFunctions/Jastrow/DiffOneBodyJastrowOrbital.h"
#include <qmc_common.h>
#include <simd/allocator.hpp>
#include <simd/algorithm.hpp>
#include <map>
#include <numeric>
namespace qmcplusplus
{
/** @ingroup WaveFunctionComponent
* @brief Specialization for one-body Jastrow function using multiple functors
*/
template<class FT>
struct J1OrbitalSoA : public WaveFunctionComponent
{
///alias FuncType
using FuncType = FT;
///type of each component U, dU, d2U;
using valT = typename FT::real_type;
///element position type
using posT = TinyVector<valT, OHMMS_DIM>;
///use the same container
using RowContainer = DistanceTableData::RowContainer;
///table index
const int myTableID;
///number of ions
int Nions;
///number of electrons
int Nelec;
///number of groups
int NumGroups;
///reference to the sources (ions)
const ParticleSet& Ions;
valT curAt;
valT curLap;
posT curGrad;
///\f$Vat[i] = sum_(j) u_{i,j}\f$
Vector<valT> Vat;
aligned_vector<valT> U, dU, d2U, d3U;
aligned_vector<valT> DistCompressed;
aligned_vector<int> DistIndice;
Vector<posT> Grad;
Vector<valT> Lap;
///Container for \f$F[ig*NumGroups+jg]\f$
std::vector<FT*> F;
J1OrbitalSoA(const ParticleSet& ions, ParticleSet& els)
: myTableID(els.addTable(ions, DT_SOA)), Ions(ions)
{
initialize(els);
ClassName = "J1OrbitalSoA";
}
J1OrbitalSoA(const J1OrbitalSoA& rhs) = delete;
~J1OrbitalSoA()
{
for (int i = 0; i < F.size(); ++i)
if (F[i] != nullptr)
delete F[i];
}
/* initialize storage */
void initialize(const ParticleSet& els)
{
Nions = Ions.getTotalNum();
NumGroups = Ions.getSpeciesSet().getTotalNum();
F.resize(std::max(NumGroups, 4), nullptr);
if (NumGroups > 1 && !Ions.IsGrouped)
{
NumGroups = 0;
}
Nelec = els.getTotalNum();
Vat.resize(Nelec);
Grad.resize(Nelec);
Lap.resize(Nelec);
U.resize(Nions);
dU.resize(Nions);
d2U.resize(Nions);
d3U.resize(Nions);
DistCompressed.resize(Nions);
DistIndice.resize(Nions);
}
void addFunc(int source_type, FT* afunc, int target_type = -1)
{
if (F[source_type] != nullptr)
delete F[source_type];
F[source_type] = afunc;
}
void recompute(ParticleSet& P)
{
const DistanceTableData& d_ie(P.getDistTable(myTableID));
for (int iat = 0; iat < Nelec; ++iat)
{
computeU3(P, iat, d_ie.Distances[iat]);
Vat[iat] = simd::accumulate_n(U.data(), Nions, valT());
Lap[iat] = accumulateGL(dU.data(), d2U.data(), d_ie.Displacements[iat], Grad[iat]);
}
}
LogValueType evaluateLog(ParticleSet& P, ParticleSet::ParticleGradient_t& G, ParticleSet::ParticleLaplacian_t& L)
{
evaluateGL(P, G, L, true);
return LogValue;
}
void evaluateHessian(ParticleSet& P, HessVector_t& grad_grad_psi)
{
const DistanceTableData& d_ie(P.getDistTable(myTableID));
valT dudr, d2udr2;
Tensor<valT, DIM> ident;
grad_grad_psi = 0.0;
ident.diagonal(1.0);
for (int iel = 0; iel < Nelec; ++iel)
{
const valT* dist = d_ie.Distances[iel];
const RowContainer& displ = d_ie.Displacements[iel];
for (int iat = 0; iat < Nions; iat++)
{
int gid = Ions.GroupID[iat];
auto* func = F[gid];
if( func != nullptr)
{
RealType r = dist[iat];
RealType rinv = 1.0 / r;
PosType dr = displ[iat];
func->evaluate(r, dudr, d2udr2);
grad_grad_psi[iel] -= rinv * rinv * outerProduct(dr, dr) * (d2udr2 - dudr * rinv) + ident * dudr * rinv;
}
}
}
}
ValueType ratio(ParticleSet& P, int iat)
{
UpdateMode = ORB_PBYP_RATIO;
curAt = computeU(P.getDistTable(myTableID).Temp_r.data());
return std::exp(Vat[iat] - curAt);
}
inline void evaluateRatios(VirtualParticleSet& VP, std::vector<ValueType>& ratios)
{
for (int k = 0; k < ratios.size(); ++k)
ratios[k] = std::exp(Vat[VP.refPtcl] - computeU(VP.getDistTable(myTableID).Distances[k]));
}
inline valT computeU(const valT* dist)
{
valT curVat(0);
if (NumGroups > 0)
{
for (int jg = 0; jg < NumGroups; ++jg)
{
if (F[jg] != nullptr)
curVat += F[jg]->evaluateV(-1, Ions.first(jg), Ions.last(jg), dist, DistCompressed.data());
}
}
else
{
for (int c = 0; c < Nions; ++c)
{
int gid = Ions.GroupID[c];
if (F[gid] != nullptr)
curVat += F[gid]->evaluate(dist[c]);
}
}
return curVat;
}
void evaluateRatiosAlltoOne(ParticleSet& P, std::vector<ValueType>& ratios)
{
const valT* restrict dist = P.getDistTable(myTableID).Temp_r.data();
curAt = valT(0);
if (NumGroups > 0)
{
for (int jg = 0; jg < NumGroups; ++jg)
{
if (F[jg] != nullptr)
curAt += F[jg]->evaluateV(-1, Ions.first(jg), Ions.last(jg), dist, DistCompressed.data());
}
}
else
{
for (int c = 0; c < Nions; ++c)
{
int gid = Ions.GroupID[c];
if (F[gid] != nullptr)
curAt += F[gid]->evaluate(dist[c]);
}
}
for (int i = 0; i < Nelec; ++i)
ratios[i] = std::exp(Vat[i] - curAt);
}
inline void evaluateGL(ParticleSet& P,
ParticleSet::ParticleGradient_t& G,
ParticleSet::ParticleLaplacian_t& L,
bool fromscratch = false)
{
if (fromscratch)
recompute(P);
for (size_t iat = 0; iat < Nelec; ++iat)
G[iat] += Grad[iat];
for (size_t iat = 0; iat < Nelec; ++iat)
L[iat] -= Lap[iat];
LogValue = -simd::accumulate_n(Vat.data(), Nelec, valT());
}
/** compute gradient and lap
* @return lap
*/
inline valT accumulateGL(const valT* restrict du,
const valT* restrict d2u,
const RowContainer& displ,
posT& grad) const
{
valT lap(0);
constexpr valT lapfac = OHMMS_DIM - RealType(1);
//#pragma omp simd reduction(+:lap)
for (int jat = 0; jat < Nions; ++jat)
lap += d2u[jat] + lapfac * du[jat];
for (int idim = 0; idim < OHMMS_DIM; ++idim)
{
const valT* restrict dX = displ.data(idim);
valT s = valT();
//#pragma omp simd reduction(+:s)
for (int jat = 0; jat < Nions; ++jat)
s += du[jat] * dX[jat];
grad[idim] = s;
}
return lap;
}
/** compute U, dU and d2U
* @param P quantum particleset
* @param iat the moving particle
* @param dist starting address of the distances of the ions wrt the iat-th particle
*/
inline void computeU3(ParticleSet& P, int iat, const valT* dist)
{
if (NumGroups > 0)
{ //ions are grouped
constexpr valT czero(0);
std::fill_n(U.data(), Nions, czero);
std::fill_n(dU.data(), Nions, czero);
std::fill_n(d2U.data(), Nions, czero);
for (int jg = 0; jg < NumGroups; ++jg)
{
if (F[jg] == nullptr)
continue;
F[jg]->evaluateVGL(-1,
Ions.first(jg),
Ions.last(jg),
dist,
U.data(),
dU.data(),
d2U.data(),
DistCompressed.data(),
DistIndice.data());
}
}
else
{
for (int c = 0; c < Nions; ++c)
{
int gid = Ions.GroupID[c];
if (F[gid] != nullptr)
{
U[c] = F[gid]->evaluate(dist[c], dU[c], d2U[c]);
dU[c] /= dist[c];
}
}
}
}
/** compute the gradient during particle-by-particle update
* @param P quantum particleset
* @param iat particle index
*/
GradType evalGrad(ParticleSet& P, int iat) { return GradType(Grad[iat]); }
/** compute the gradient during particle-by-particle update
* @param P quantum particleset
* @param iat particle index
*
* Using Temp_r. curAt, curGrad and curLap are computed.
*/
ValueType ratioGrad(ParticleSet& P, int iat, GradType& grad_iat)
{
UpdateMode = ORB_PBYP_PARTIAL;
computeU3(P, iat, P.getDistTable(myTableID).Temp_r.data());
curLap = accumulateGL(dU.data(), d2U.data(), P.getDistTable(myTableID).Temp_dr, curGrad);
curAt = simd::accumulate_n(U.data(), Nions, valT());
grad_iat += curGrad;
return std::exp(Vat[iat] - curAt);
}
/** Rejected move. Nothing to do */
inline void restore(int iat) {}
/** Accpted move. Update Vat[iat],Grad[iat] and Lap[iat] */
void acceptMove(ParticleSet& P, int iat)
{
if (UpdateMode == ORB_PBYP_RATIO)
{
computeU3(P, iat, P.getDistTable(myTableID).Temp_r.data());
curLap = accumulateGL(dU.data(), d2U.data(), P.getDistTable(myTableID).Temp_dr, curGrad);
}
LogValue += Vat[iat] - curAt;
Vat[iat] = curAt;
Grad[iat] = curGrad;
Lap[iat] = curLap;
}
inline void registerData(ParticleSet& P, WFBufferType& buf)
{
if (Bytes_in_WFBuffer == 0)
{
Bytes_in_WFBuffer = buf.current();
buf.add(Vat.begin(), Vat.end());
buf.add(Grad.begin(), Grad.end());
buf.add(Lap.begin(), Lap.end());
Bytes_in_WFBuffer = buf.current() - Bytes_in_WFBuffer;
// free local space
Vat.free();
Grad.free();
Lap.free();
}
else
{
buf.forward(Bytes_in_WFBuffer);
}
}
inline LogValueType updateBuffer(ParticleSet& P, WFBufferType& buf, bool fromscratch = false)
{
evaluateGL(P, P.G, P.L, false);
buf.forward(Bytes_in_WFBuffer);
return LogValue;
}
inline void copyFromBuffer(ParticleSet& P, WFBufferType& buf)
{
Vat.attachReference(buf.lendReference<valT>(Nelec), Nelec);
Grad.attachReference(buf.lendReference<posT>(Nelec), Nelec);
Lap.attachReference(buf.lendReference<valT>(Nelec), Nelec);
}
WaveFunctionComponentPtr makeClone(ParticleSet& tqp) const
{
J1OrbitalSoA<FT>* j1copy = new J1OrbitalSoA<FT>(Ions, tqp);
j1copy->Optimizable = Optimizable;
for (size_t i = 0, n = F.size(); i < n; ++i)
{
if (F[i] != nullptr)
j1copy->addFunc(i, new FT(*F[i]));
}
if (dPsi)
{
j1copy->dPsi = dPsi->makeClone(tqp);
}
return j1copy;
}
/**@{ WaveFunctionComponent virtual functions that are not essential for the development */
void resetTargetParticleSet(ParticleSet& P) {}
void reportStatus(std::ostream& os)
{
for (size_t i = 0, n = F.size(); i < n; ++i)
{
if (F[i] != nullptr)
F[i]->myVars.print(os);
}
}
void checkInVariables(opt_variables_type& active)
{
myVars.clear();
for (size_t i = 0, n = F.size(); i < n; ++i)
{
if (F[i] != nullptr)
{
F[i]->checkInVariables(active);
F[i]->checkInVariables(myVars);
}
}
}
void checkOutVariables(const opt_variables_type& active)
{
myVars.getIndex(active);
Optimizable = myVars.is_optimizable();
for (size_t i = 0, n = F.size(); i < n; ++i)
if (F[i] != nullptr)
F[i]->checkOutVariables(active);
if (dPsi)
dPsi->checkOutVariables(active);
}
void resetParameters(const opt_variables_type& active)
{
if (!Optimizable)
return;
for (size_t i = 0, n = F.size(); i < n; ++i)
if (F[i] != nullptr)
F[i]->resetParameters(active);
for (int i = 0; i < myVars.size(); ++i)
{
int ii = myVars.Index[i];
if (ii >= 0)
myVars[i] = active[ii];
}
if (dPsi)
dPsi->resetParameters(active);
}
/**@} */
inline GradType evalGradSource(ParticleSet& P, ParticleSet& source, int isrc)
{
GradType g_return(0.0);
const DistanceTableData& d_ie(P.getDistTable(myTableID));
for (int iat = 0; iat < Nelec; ++iat)
{
const valT* dist = d_ie.Distances[iat];
const RowContainer& displ = d_ie.Displacements[iat];
int gid = Ions.GroupID[isrc];
RealType r = dist[isrc];
RealType rinv = 1.0 / r;
PosType dr = displ[isrc];
if (F[gid] != nullptr)
{
U[isrc] = F[gid]->evaluate(dist[isrc], dU[isrc], d2U[isrc], d3U[isrc]);
g_return -= dU[isrc] * rinv * dr;
}
}
return g_return;
}
inline GradType evalGradSource(ParticleSet& P,
ParticleSet& source,
int isrc,
TinyVector<ParticleSet::ParticleGradient_t, OHMMS_DIM>& grad_grad,
TinyVector<ParticleSet::ParticleLaplacian_t, OHMMS_DIM>& lapl_grad)
{
GradType g_return(0.0);
const DistanceTableData& d_ie(P.getDistTable(myTableID));
for (int iat = 0; iat < Nelec; ++iat)
{
const valT* dist = d_ie.Distances[iat];
const RowContainer& displ = d_ie.Displacements[iat];
int gid = Ions.GroupID[isrc];
RealType r = dist[isrc];
RealType rinv = 1.0 / r;
PosType dr = displ[isrc];
if (F[gid] != nullptr)
{
U[isrc] = F[gid]->evaluate(dist[isrc], dU[isrc], d2U[isrc], d3U[isrc]);
}
else
{
APP_ABORT("J1OrbitalSoa::evaluateGradSource: F[gid]==nullptr")
}
g_return -= dU[isrc] * rinv * dr;
//The following terms depend only on the radial component r. Thus,
//we compute them and mix with position vectors to acquire the full
//cartesian vector objects.
valT grad_component = (d2U[isrc] - dU[isrc] * rinv);
valT lapl_component = d3U[isrc] + 2 * rinv * grad_component;
for (int idim = 0; idim < OHMMS_DIM; idim++)
{
grad_grad[idim][iat] += dr[idim] * dr * rinv * rinv * grad_component;
grad_grad[idim][iat][idim] += rinv * dU[isrc];
lapl_grad[idim][iat] -= lapl_component * rinv * dr[idim];
}
}
return g_return;
}
};
} // namespace qmcplusplus
#endif
|
vx_lut.c | /*
* Copyright (c) 2012-2014 The Khronos Group Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and/or associated documentation files (the
* "Materials"), to deal in the Materials without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Materials, and to
* permit persons to whom the Materials are furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Materials.
*
* THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
*/
/*!
* \file
* \brief The TableLookup Kernel.
* \author Erik Rainey <erik.rainey@gmail.com>
*/
#include <VX/vx.h>
#include <VX/vx_helper.h>
#include <omp.h>
#include <vx_internal.h>
#include <math.h>
static vx_status vxTableLookupKernel(vx_node node, const vx_reference *parameters, vx_uint32 num)
{
vx_status status = VX_ERROR_INVALID_PARAMETERS;
if (num == 3)
{
vx_image src_image = (vx_image) parameters[0];
vx_lut lut = (vx_lut)parameters[1];
vx_image dst_image = (vx_image) parameters[2];
int chunk;
/* int nthreads, tid; */ /* Removing nthreads and tid warning if the printf is commented out */
vx_int32 x, y;
void *src = NULL, *dst = NULL;
vx_imagepatch_addressing_t src_addr;
vx_imagepatch_addressing_t dst_addr;
vx_rectangle_t rect;
vx_uint8 *table = NULL, *out, *ptr;
status = VX_SUCCESS;
status |= vxGetValidRegionImage(src_image, &rect);
status |= vxAccessImagePatch(src_image, &rect, 0, &src_addr, &src, VX_READ_AND_WRITE);
status |= vxAccessLUT(lut, (void **)&table, VX_READ_AND_WRITE);
status |= vxAccessImagePatch(dst_image, &rect, 0, &dst_addr, &dst, VX_READ_AND_WRITE);
if (status != VX_SUCCESS)
return status;
#pragma omp parallel shared(y, src, dst, src_addr, dst_addr) private(/*nthreads, tid,*/ ptr, out, x)
{
chunk = src_addr.dim_y / TARGET_NUM_CORES;
#if 0 /* Removing nthreads and tid warning if the printf is commented out */
tid = omp_get_thread_num();
if (tid == 0)
{
nthreads = omp_get_num_threads();
//printf("Thread[%d] chunk = %d\n", nthreads, chunk);
}
#endif
#pragma omp for schedule(dynamic, chunk)
for (y = 0; y < (vx_int32)src_addr.dim_y; y++)
{
for (x = 0; x < (vx_int32)src_addr.dim_x; x++)
{
ptr = vxFormatImagePatchAddress2d(src, x, y, &src_addr);
out = vxFormatImagePatchAddress2d(dst, x, y, &dst_addr);
if (ptr && out)
*out = table[*ptr];
}
}
}
status |= vxCommitImagePatch(src_image, NULL, 0, &src_addr, src);
status |= vxCommitLUT(lut, table);
status |= vxCommitImagePatch(dst_image, &rect, 0, &dst_addr, dst);
}
return status;
}
static vx_status vxTableLookupInputValidator(vx_node node, vx_uint32 index)
{
vx_status status = VX_ERROR_INVALID_PARAMETERS;
if (index == 0)
{
vx_image input = 0;
vx_parameter param = vxGetParameterByIndex(node, index);
vxQueryParameter(param, VX_PARAMETER_ATTRIBUTE_REF, &input, sizeof(input));
if (input)
{
vx_df_image format = 0;
vxQueryImage(input, VX_IMAGE_ATTRIBUTE_FORMAT, &format, sizeof(format));
if (format == VX_DF_IMAGE_U8 || format == VX_DF_IMAGE_S16)
{
status = VX_SUCCESS;
}
vxReleaseImage(&input);
}
vxReleaseParameter(¶m);
}
else if (index == 1)
{
vx_parameter param = vxGetParameterByIndex(node, index);
vx_lut lut = 0;
vxQueryParameter(param, VX_PARAMETER_ATTRIBUTE_REF, &lut, sizeof(lut));
if (lut)
{
vx_enum type = 0;
vxQueryLUT(lut, VX_LUT_ATTRIBUTE_TYPE, &type, sizeof(type));
if (type == VX_TYPE_UINT8 || type == VX_TYPE_INT16)
{
status = VX_SUCCESS;
}
vxReleaseLUT(&lut);
}
vxReleaseParameter(¶m);
}
return status;
}
static vx_status vxTableLookupOutputValidator(vx_node node, vx_uint32 index, vx_meta_format_t *ptr)
{
vx_status status = VX_ERROR_INVALID_PARAMETERS;
if (index == 2)
{
vx_parameter src_param = vxGetParameterByIndex(node, 0);
if (src_param)
{
vx_image src = 0;
vxQueryParameter(src_param, VX_PARAMETER_ATTRIBUTE_REF, &src, sizeof(src));
if (src)
{
vx_uint32 width = 0, height = 0;
vxQueryImage(src, VX_IMAGE_ATTRIBUTE_WIDTH, &width, sizeof(height));
vxQueryImage(src, VX_IMAGE_ATTRIBUTE_HEIGHT, &height, sizeof(height));
/* output is equal type and size */
ptr->type = VX_TYPE_IMAGE;
ptr->dim.image.format = VX_DF_IMAGE_U8;
ptr->dim.image.width = width;
ptr->dim.image.height = height;
status = VX_SUCCESS;
vxReleaseImage(&src);
}
vxReleaseParameter(&src_param);
}
}
return status;
}
static vx_param_description_t lut_kernel_params[] = {
{VX_INPUT, VX_TYPE_IMAGE, VX_PARAMETER_STATE_REQUIRED},
{VX_INPUT, VX_TYPE_LUT, VX_PARAMETER_STATE_REQUIRED},
{VX_OUTPUT,VX_TYPE_IMAGE, VX_PARAMETER_STATE_REQUIRED},
};
vx_kernel_description_t lut_kernel = {
VX_KERNEL_TABLE_LOOKUP,
"org.khronos.openvx.table_lookup",
vxTableLookupKernel,
lut_kernel_params, dimof(lut_kernel_params),
vxTableLookupInputValidator,
vxTableLookupOutputValidator,
NULL,
NULL,
};
|
nlife.c | /* lance un jeu de la vie sur la totalité de la taille de la console */
/* devrait etre plus rapide que l algo naïf */
/* gcc -Wall -O3 nlife.c -lncurses */
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <signal.h>
#include <time.h>
#include <string.h>
#ifdef USE_LIBNCURSES
# include <ncurses.h>
#endif
#ifdef USE_LIBCACA
# include <caca.h>
/* libcaca - 0.x API */
# ifdef CACA_API_VERSION_1
# include <caca0.h>
# endif
#endif
#ifdef USE_SDL
# include <SDL.h>
#endif
#ifdef USE_OMP
#include <omp.h>
#endif
/* lifegame */
time_t start_time; /* pour afficher des stats */
int nobench = 1;
unsigned long long nbgenerations; /* pour afficher des stats */
unsigned long long nbgenerationsmax = 0; /* pour afficher des stats */
unsigned int lines=100, rows=100; /* pour afficher des stats dans le finish */
unsigned int *world1, *world2; /* pour pouvoir libérer la mémoire dans le finish */
unsigned int prob = 5; /* probabilité d'avoir une cellule lors de la creation du monde */
char * filename = NULL;
int init_rand_world(unsigned int* world, unsigned int tlines, unsigned int trows);
int load_file(char * file, unsigned int* world, unsigned int tlines, unsigned int trows);
void finish(int);
/* affichage */
void aff_init(unsigned int * tlines, unsigned int * trows);
void aff_refresh(unsigned int *world , unsigned int tlines, unsigned int trows);
void aff_finish();
/* analyse ligne de commande */
extern char *optarg;
int main(int argc, char * argv[]) {
register unsigned int i,j;
char c;
long pausetime = 0;
/* analyse de la ligne de commande */
while((c = (char)getopt (argc, argv, "hbt:p:l:g:L:R:")) != (char)EOF)
switch(c) {
case 'h': printf("Usage:\n"
"-t <ms> pause <ms> milliseconds between each generation.\n"
"-p <n> probability to have a living cell somewhere is 1/<n>.\n"
"-b bench, doesn't display anything but stats.\n"
"-l <file> load <file>.\n"
"-g <n> stop after <n> generations.\n"
"-L <n> <n> lines (only with -b).\n"
"-R <n> <n> rows (only with -b).\n"
);
exit(0); break;
case 'b': nobench = 0; break;
case 't': pausetime = atol(optarg)*1000; break;
case 'p': prob = atoi(optarg); break;
case 'l': filename = strdup(optarg); break;
case 'g': nbgenerationsmax = atoll(optarg); break;
case 'L': lines = atoll(optarg); break;
case 'R': rows = atoll(optarg); break;
}
if (!nobench) printf("benchmark in progress, Ctrl-C to abort...\n");
/* on arrete la simulation par un Ctrl-C */
signal(SIGINT, finish);
signal(SIGTERM, finish);
/* init affichage */
if (nobench) aff_init(&lines, &rows);
world1 = calloc(sizeof(int), lines*rows);/* init à 0 */
world2 = calloc(sizeof(int), lines*rows);/* init à 0 */
/* init du monde */
if (filename == NULL)
init_rand_world(world1, lines, rows);
else
load_file(filename, world1, lines, rows);
start_time = time(NULL);
nbgenerations = 0;
while(1) {
if (nbgenerationsmax && nbgenerations >= nbgenerationsmax) finish(0);
nbgenerations++;
/* affichage */
if (nobench) aff_refresh(world1, lines, rows);
if (pausetime) usleep(pausetime);
/* next gen */
/* calcul nombre de voisin pour chaque case */
/* on obtient un tableau du monde avec sur chaque case son nbre de voisins */
#pragma omp parallel for private(j) schedule(dynamic,1)
/* Warning: obvious race condition may occurs in here... */
for(i=0;i<lines;++i)
for(j=0;j<rows;++j) {
if (world1[i*rows+j] == 1) {
unsigned int k = (i-1+lines)%lines *rows + (j-1+rows)%rows;
unsigned int l = (i-1+lines)%lines *rows + (j +rows)%rows;
unsigned int m = (i-1+lines)%lines *rows + (j+1+rows)%rows;
unsigned int n = (i +lines)%lines *rows + (j-1+rows)%rows;
unsigned int o = (i +lines)%lines *rows + (j+1+rows)%rows;
unsigned int x = (i+1+lines)%lines *rows + (j-1+rows)%rows;
unsigned int y = (i+1+lines)%lines *rows + (j +rows)%rows;
unsigned int z = (i+1+lines)%lines *rows + (j+1+rows)%rows;
/* le monde est un tore, attention aux modulos négatifs */
#pragma omp atomic
world2[k]++;
#pragma omp atomic
world2[l]++;
#pragma omp atomic
world2[m]++;
#pragma omp atomic
world2[n]++;
#pragma omp atomic
world2[o]++;
#pragma omp atomic
world2[x]++;
#pragma omp atomic
world2[y]++;
#pragma omp atomic
world2[z]++;
}
}
/* actualisation du tableau suivant le nbre de voisins */
#pragma omp parallel for private(j) schedule(dynamic,1)
for(i=0;i<lines;++i)
for(j=0;j<rows;++j) {
world1[i*rows+j] =
((world1[i*rows+j] == 0 && world2[i*rows+j]==3)
||(world1[i*rows+j] == 1 && ( world2[i*rows+j]==3 || world2[i*rows+j]==2)))
?1:0;
world2[i*rows+j] =0; /* reinit à 0 pour la generation suivante */
}
}
}
/********************************************************************/
int init_rand_world(unsigned int* world, unsigned int tlines, unsigned int trows) {
unsigned int i,j;
/* init du monde */
srand(getpid());
#pragma omp parallel for private(j) schedule(dynamic,1)
for(i=0;i<tlines;++i)
for(j=0;j<trows;++j) {
world[i*trows+j] = (rand()%prob)?0:1 ;
}
return 1;
}
/********************************************************************/
int load_file(char * file, unsigned int* world, unsigned int tlines, unsigned int trows) {
/* parsage bien crado */
FILE* stream;
int l = tlines/3, r= trows/3;
char c;
int type = 'P';
if ( (stream = fopen(file, "r")) == NULL)
return 1;
while ((c = (char)fgetc(stream)) != (char)EOF) {
switch (c) {
case '\n': /* ligne vide */
break;
case '#':
if ((type = fgetc(stream)) != '\n')
/* passage à la ligne suivante */
while ((c = (char)fgetc(stream)) != '\n' && c != (char)EOF);
break;
default:
do { /* chargement */
if (type == 'P') {
if (c == '.')
++r;
else if (c == '\n')
++l, r = trows/3;
else if (c == '*' || c == '1')
++r, world[(l+tlines)%tlines*trows+(r+trows)%trows] = 1;
}
} while ((c = (char)fgetc(stream)) != (char)EOF) ;
}
}
fclose(stream);
printf("import OK\n");
return 0;
}
/********************************************************************/
void finish(int i) {
time_t duration;
if (nobench) aff_finish();
duration = time(NULL)-start_time;
if (duration == 0) duration=1;
printf("We are done.\n");
printf("World size: %d x %d\n", lines, rows);
printf("%lld generations, %d seconds (%lld gen/sec, ~ %lld cell/sec)\n",
nbgenerations, (int)duration,
nbgenerations/(long long)duration, nbgenerations*lines*rows/(long long)duration);
free(world1);
free(world2);
if (filename != NULL) free(filename);
exit(0);
}
/********************************************************************/
/**************************** affichage *****************************/
#ifdef USE_LIBNCURSES
void aff_init(unsigned int * tlines, unsigned int * trows) {
initscr(); /* initialize the curses library */
// nonl(); /* tell curses not to do NL->CR/NL on output */
if (has_colors()) {
start_color();
init_pair(1, COLOR_WHITE, COLOR_BLACK);
init_pair(2, COLOR_MAGENTA, COLOR_RED);
}
/* init taille du monde */
*tlines = LINES; /* initialisé par ncurses */
*trows = COLS; /* initialisé par ncurses */
/* refresh(); */
}
/********************************************************************/
void aff_refresh(unsigned int * world, unsigned int tlines, unsigned int trows) {
register unsigned int i,j;
for(i=0;i<tlines;++i){
for(j=0;j<trows;++j) {
if (world[i*trows+j] == 1){
attrset(COLOR_PAIR(2)|WA_BOLD);
mvaddch(i,j,'@');
}
if (world[i*trows+j] == 0) {
attrset(COLOR_PAIR(1)|WA_NORMAL);
mvaddch(i,j,' ');
}
}
}
refresh();
}
/********************************************************************/
void aff_finish() {
endwin();
}
#endif
#ifdef USE_LIBCACA
/**************************** affichage *****************************/
void aff_init(unsigned int * tlines, unsigned int * trows) {
if(caca_init() < 0)
printf("Unable to initialize cacalib\n");
caca_set_window_title("nlife");
//caca_set_delay(20000);/* FIXME */
caca_set_feature(CACA_BACKGROUND_BLACK);
*tlines = caca_get_height();
*trows = caca_get_width();
}
/********************************************************************/
void aff_refresh(unsigned int * world, unsigned int tlines, unsigned int trows) {
register unsigned int i,j;
/* exit on any key */
if (caca_get_event(CACA_EVENT_KEY_PRESS))
finish(-5);
for(i=0;i<tlines;++i){
for(j=0;j<trows;++j) {
if (world[i*trows+j] == 1){
caca_set_color(CACA_COLOR_LIGHTRED,CACA_COLOR_MAGENTA);
caca_putchar(j,i,'@');
}
if (world[i*trows+j] == 0) {
caca_set_color(CACA_COLOR_BLACK,CACA_COLOR_BLACK);
caca_putchar(j,i,' ');
}
}
}
caca_refresh();
}
/********************************************************************/
void aff_finish() {
caca_end();
}
#endif
/********************************************************************/
/**************************** affichage *****************************/
#ifdef USE_SDL
SDL_Surface *screen;
void aff_init(unsigned int * tlines, unsigned int * trows) {
/* unsigned int LINESSDL=1400; */
/* unsigned int ROWSSDL=1050; */
unsigned int LINESSDL=640;
unsigned int ROWSSDL=480;
/*init SDL*/
if (SDL_Init(SDL_INIT_VIDEO) != 0) {
printf("Unable to initialize SDL: %s\n", SDL_GetError());
}
atexit(SDL_Quit);
/*init surface*/
/* screen = SDL_SetVideoMode(LINESSDL, ROWSSDL, 8, SDL_ANYFORMAT | SDL_FULLSCREEN); */
screen = SDL_SetVideoMode(LINESSDL, ROWSSDL, 8, SDL_ANYFORMAT);
if (screen == NULL) {
printf("Unable to set video mode: %s\n", SDL_GetError());
}
/* init taille du monde */
*tlines = ROWSSDL; /* initialisé par ncurses */
*trows = LINESSDL; /* initialisé par ncurses */
/* refresh(); */
}
/*http://www.libsdl.org/intro.fr/usingvideofr.html*/
void DrawPixel(SDL_Surface *screen, int x, int y,
Uint8 R, Uint8 G,
Uint8 B)
{
Uint32 color = SDL_MapRGB(screen->format, R, G, B);
switch (screen->format->BytesPerPixel) {
case 1: {
/*On gère le mode 8bpp */
Uint8 *bufp;
bufp = (Uint8 *)screen->pixels + y*screen->pitch + x;
/* *bufp = (*bufp+(Uint8)color)/2; */
*bufp = color;
}
break;
case 2: {
/* Certainement 15 ou 16 bpp */
Uint16 *bufp;
bufp = (Uint16 *)screen->pixels + y*screen->pitch/2 + x;
/* *bufp = (*bufp+(Uint16)color)/2; */
*bufp = color;
}
break;
case 3: {
/* 24 bpp lent et généralement pas utilisé */
Uint8 *bufp;
bufp = (Uint8 *)screen->pixels + y*screen->pitch + x * 3;
if(SDL_BYTEORDER == SDL_LIL_ENDIAN) {
bufp[0] = color;
bufp[1] = color >> 8;
bufp[2] = color >> 16;
} else {
bufp[2] = color;
bufp[1] = color >> 8;
bufp[0] = color >> 16;
}
}
break;
case 4: {
/* Probablement 32 bpp alors */
Uint32 *bufp;
bufp = (Uint32 *)screen->pixels + y*screen->pitch/4 + x;
*bufp = (*bufp/2+color);
/* *bufp = color; */
}
break;
}
}
/********************************************************************/
void aff_refresh(unsigned int * world, unsigned int tlines, unsigned int trows) {
register unsigned int i,j;
if ( SDL_MUSTLOCK(screen) ) {
if ( SDL_LockSurface(screen) < 0 ) {
return;
}
}
#pragma omp parallel for private(j) schedule(dynamic,1)
for(i=0;i<tlines;++i){
for(j=0;j<trows;++j) {
if (world[i*trows+j] == 1){
DrawPixel(screen, j,i, 255,0,0);
}
if (world[i*trows+j] == 0) {
DrawPixel(screen, j,i, 0,0,0);
}
}
}
/* SDL_UpdateRect(screen , 0 , 0 , 0 , 0 ); */
if ( SDL_MUSTLOCK(screen) ) {
SDL_UnlockSurface(screen);
}
SDL_Flip(screen);
SDL_Event event;
if (SDL_PollEvent(&event)&& (event.type==SDL_KEYDOWN)) finish(0);
}
/********************************************************************/
void aff_finish() {
SDL_Quit();
}
#endif
|
test_task_single.c | //===-- test_task_single.cc - Test task barrier of single ---------*- C -*-===//
//
// Part of the LOMP project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
//
// This file has been modified from the file
// openmp/runtime/test/tasking/omp_task.c
// of the LLVM project (https://github.com/llvm/llvm-project)
// under the Apache License v2.0 with LLVM Exceptions.
//
//===----------------------------------------------------------------------===//
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <math.h>
#include <omp.h>
#include "tests.h"
int test_omp_task(void) {
int tids[NUM_TASKS];
int i;
#pragma omp parallel
{
#pragma omp single
{
for (i = 0; i < NUM_TASKS; i++) {
/* First we have to store the value of the loop index in a new variable
* which will be private for each task because otherwise it will be overwritten
* if the execution of the task takes longer than the time which is needed to
* enter the next step of the loop!
*/
int myi;
myi = i;
printf("Create task %d\n", myi);
#pragma omp task
{
sleep(SLEEPTIME);
tids[myi] = omp_get_thread_num();
printf("Executed task %d in thread %d\n", myi, omp_get_thread_num());
} /* end of omp task */
} /* end of for */
} /* end of single */
} /*end of parallel */
/* Now we check if more than one thread executed the tasks. */
for (i = 1; i < NUM_TASKS; i++) {
if (tids[0] != tids[i])
return 1;
}
return 0;
} /* end of check_parallel_for_private */
int main(void) {
int i;
int num_failed = 0;
if (omp_get_max_threads() < 2) {
printf("Not enough threads for this test! Need >2 threads!\n");
}
// omp_set_num_threads(8);
for (i = 0; i < REPETITIONS; i++) {
if (!test_omp_task()) {
num_failed++;
}
}
return num_failed != 0 ? EXIT_FAILURE : EXIT_SUCCESS;
} |
convolution_3x3_int8.h | // BUG1989 is pleased to support the open source community by supporting ncnn available.
//
// Copyright (C) 2019 BUG1989. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_int8_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const signed char *kernel = _kernel;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
out0.fill(0);
const signed char *kernel0 = (const signed char *)kernel + p * inch * 9;
for (int q = 0; q < inch; q++)
{
int *outptr0 = out0;
const signed char *img0 = bottom_blob.channel(q);
const signed char *r0 = img0;
const signed char *r1 = img0 + w;
const signed char *r2 = img0 + w * 2;
for (int i = 0; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
int sum0 = 0;
sum0 += (int)r0[0] * kernel0[0];
sum0 += (int)r0[1] * kernel0[1];
sum0 += (int)r0[2] * kernel0[2];
sum0 += (int)r1[0] * kernel0[3];
sum0 += (int)r1[1] * kernel0[4];
sum0 += (int)r1[2] * kernel0[5];
sum0 += (int)r2[0] * kernel0[6];
sum0 += (int)r2[1] * kernel0[7];
sum0 += (int)r2[2] * kernel0[8];
*outptr0 += sum0;
r0++;
r1++;
r2++;
outptr0++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
kernel0 += 9;
}
}
}
static void conv3x3s1_winograd23_transform_kernel_int8_sse(const Mat& kernel, Mat& kernel_tm, int inch, int outch)
{
kernel_tm.create(4*4, inch, outch, 2ul);
// G
const short ktm[4][3] = {
{ 2, 0, 0},
{ 1, 1, 1},
{ 1, -1, 1},
{ 0, 0, 2}
};
#pragma omp parallel for
for (int p = 0; p<outch; p++)
{
for (int q = 0; q<inch; q++)
{
const signed char* kernel0 = (const signed char*)kernel + p*inch * 9 + q * 9;
short* kernel_tm0 = kernel_tm.channel(p).row<short>(q);
// transform kernel
const signed char* k0 = kernel0;
const signed char* k1 = kernel0 + 3;
const signed char* k2 = kernel0 + 6;
// h
short tmp[4][3];
for (int i=0; i<4; i++)
{
tmp[i][0] = (short)k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = (short)k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = (short)k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j=0; j<4; j++)
{
short* tmpp = &tmp[j][0];
for (int i=0; i<4; i++)
{
kernel_tm0[j*4 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
}
static void conv3x3s1_winograd23_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 2n+2, winograd F(2,3)
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 1) / 2 * 2;
outh = (outh + 1) / 2 * 2;
w = outw + 2;
h = outh + 2;
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm/4; // may be the block num in Feathercnn
int nRowBlocks = w_tm/4;
const int tiles = nColBlocks * nRowBlocks;
bottom_blob_tm.create(4*4, tiles, inch, 2u, opt.workspace_allocator);
// BT
// const float itm[4][4] = {
// {1.0f, 0.0f, -1.0f, 0.0f},
// {0.0f, 1.0f, 1.00f, 0.0f},
// {0.0f, -1.0f, 1.00f, 0.0f},
// {0.0f, -1.0f, 0.00f, 1.0f}
// };
#pragma omp parallel for num_threads(opt.num_threads)
for (int q=0; q<inch; q++)
{
const signed char* img = bottom_blob_bordered.channel(q);
short* out_tm0 = bottom_blob_tm.channel(q);
for (int j = 0; j < nColBlocks; j++)
{
const signed char* r0 = img + w * j * 2;
const signed char* r1 = r0 + w;
const signed char* r2 = r1 + w;
const signed char* r3 = r2 + w;
for (int i = 0; i < nRowBlocks; i++)
{
short d0[4],d1[4],d2[4],d3[4];
short w0[4],w1[4],w2[4],w3[4];
short t0[4],t1[4],t2[4],t3[4];
// load
for (int n = 0; n < 4; n++)
{
d0[n] = r0[n];
d1[n] = r1[n];
d2[n] = r2[n];
d3[n] = r3[n];
}
// w = B_t * d
for (int n = 0; n < 4; n++)
{
w0[n] = d0[n] - d2[n];
w1[n] = d1[n] + d2[n];
w2[n] = d2[n] - d1[n];
w3[n] = d3[n] - d1[n];
}
// transpose d to d_t
{
t0[0]=w0[0]; t1[0]=w0[1]; t2[0]=w0[2]; t3[0]=w0[3];
t0[1]=w1[0]; t1[1]=w1[1]; t2[1]=w1[2]; t3[1]=w1[3];
t0[2]=w2[0]; t1[2]=w2[1]; t2[2]=w2[2]; t3[2]=w2[3];
t0[3]=w3[0]; t1[3]=w3[1]; t2[3]=w3[2]; t3[3]=w3[3];
}
// U = B_t * d_t
for (int n = 0; n < 4; n++)
{
d0[n] = t0[n] - t2[n];
d1[n] = t1[n] + t2[n];
d2[n] = t2[n] - t1[n];
d3[n] = t3[n] - t1[n];
}
// save to out_tm
for (int n = 0; n < 4; n++)
{
out_tm0[n ] = d0[n];
out_tm0[n+ 4] = d1[n];
out_tm0[n+ 8] = d2[n];
out_tm0[n+12] = d3[n];
}
r0 += 2;
r1 += 2;
r2 += 2;
r3 += 2;
out_tm0 += 16;
}
}
}
}
bottom_blob_bordered = Mat();
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm/4; // may be the block num in Feathercnn
int nRowBlocks = w_tm/4;
const int tiles = nColBlocks * nRowBlocks;
top_blob_tm.create(16, tiles, outch, 4u, opt.workspace_allocator);
int nn_outch = outch >> 2;
int remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 4;
Mat out0_tm = top_blob_tm.channel(p);
Mat out1_tm = top_blob_tm.channel(p+1);
Mat out2_tm = top_blob_tm.channel(p+2);
Mat out3_tm = top_blob_tm.channel(p+3);
const Mat kernel0_tm = kernel_tm.channel(p);
const Mat kernel1_tm = kernel_tm.channel(p+1);
const Mat kernel2_tm = kernel_tm.channel(p+2);
const Mat kernel3_tm = kernel_tm.channel(p+3);
for (int i=0; i<tiles; i++)
{
int* output0_tm = out0_tm.row<int>(i);
int* output1_tm = out1_tm.row<int>(i);
int* output2_tm = out2_tm.row<int>(i);
int* output3_tm = out3_tm.row<int>(i);
int sum0[16] = {0};
int sum1[16] = {0};
int sum2[16] = {0};
int sum3[16] = {0};
int q = 0;
for (; q+3<inch; q+=4)
{
const short* r0 = bottom_blob_tm.channel(q).row<short>(i);
const short* r1 = bottom_blob_tm.channel(q+1).row<short>(i);
const short* r2 = bottom_blob_tm.channel(q+2).row<short>(i);
const short* r3 = bottom_blob_tm.channel(q+3).row<short>(i);
const short* k0 = kernel0_tm.row<short>(q);
const short* k1 = kernel1_tm.row<short>(q);
const short* k2 = kernel2_tm.row<short>(q);
const short* k3 = kernel3_tm.row<short>(q);
for (int n=0; n<16; n++)
{
sum0[n] += (int)r0[n] * k0[n];
k0 += 16;
sum0[n] += (int)r1[n] * k0[n];
k0 += 16;
sum0[n] += (int)r2[n] * k0[n];
k0 += 16;
sum0[n] += (int)r3[n] * k0[n];
k0 -= 16 * 3;
sum1[n] += (int)r0[n] * k1[n];
k1 += 16;
sum1[n] += (int)r1[n] * k1[n];
k1 += 16;
sum1[n] += (int)r2[n] * k1[n];
k1 += 16;
sum1[n] += (int)r3[n] * k1[n];
k1 -= 16 * 3;
sum2[n] += (int)r0[n] * k2[n];
k2 += 16;
sum2[n] += (int)r1[n] * k2[n];
k2 += 16;
sum2[n] += (int)r2[n] * k2[n];
k2 += 16;
sum2[n] += (int)r3[n] * k2[n];
k2 -= 16 * 3;
sum3[n] += (int)r0[n] * k3[n];
k3 += 16;
sum3[n] += (int)r1[n] * k3[n];
k3 += 16;
sum3[n] += (int)r2[n] * k3[n];
k3 += 16;
sum3[n] += (int)r3[n] * k3[n];
k3 -= 16 * 3;
}
}
for (; q<inch; q++)
{
const short* r0 = bottom_blob_tm.channel(q).row<short>(i);
const short* k0 = kernel0_tm.row<short>(q);
const short* k1 = kernel1_tm.row<short>(q);
const short* k2 = kernel2_tm.row<short>(q);
const short* k3 = kernel3_tm.row<short>(q);
for (int n=0; n<16; n++)
{
sum0[n] += (int)r0[n] * k0[n];
sum1[n] += (int)r0[n] * k1[n];
sum2[n] += (int)r0[n] * k2[n];
sum3[n] += (int)r0[n] * k3[n];
}
}
for (int n=0; n<16; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
}
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
Mat out0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int i=0; i<tiles; i++)
{
int* output0_tm = out0_tm.row<int>(i);
int sum0[16] = {0};
int q = 0;
for (; q+3<inch; q+=4)
{
const short* r0 = bottom_blob_tm.channel(q).row<short>(i);
const short* r1 = bottom_blob_tm.channel(q+1).row<short>(i);
const short* r2 = bottom_blob_tm.channel(q+2).row<short>(i);
const short* r3 = bottom_blob_tm.channel(q+3).row<short>(i);
const short* k0 = kernel0_tm.row<short>(q);
const short* k1 = kernel0_tm.row<short>(q+1);
const short* k2 = kernel0_tm.row<short>(q+2);
const short* k3 = kernel0_tm.row<short>(q+3);
for (int n=0; n<16; n++)
{
sum0[n] += (int)r0[n] * k0[n];
sum0[n] += (int)r1[n] * k1[n];
sum0[n] += (int)r2[n] * k2[n];
sum0[n] += (int)r3[n] * k3[n];
}
}
for (; q<inch; q++)
{
const short* r0 = bottom_blob_tm.channel(q).row<short>(i);
const short* k0 = kernel0_tm.row<short>(q);
for (int n=0; n<16; n++)
{
sum0[n] += (int)r0[n] * k0[n];
}
}
for (int n=0; n<16; n++)
{
output0_tm[n] = sum0[n];
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator);
{
// AT
// const float itm[2][4] = {
// {1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 1.0f}
// };
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm/4; // may be the block num in Feathercnn
int nRowBlocks = w_tm/4;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<outch; p++)
{
Mat out_tm = top_blob_tm.channel(p);
Mat out = top_blob_bordered.channel(p);
for (int j=0; j<nColBlocks; j++)
{
int* outRow0 = out.row<int>(j*2);
int* outRow1 = out.row<int>(j*2+1);
for(int i=0; i<nRowBlocks; i++)
{
int* out_tile = out_tm.row<int>(j*nRowBlocks + i);
int s0[4],s1[4],s2[4],s3[4];
int w0[4],w1[4];
int d0[2],d1[2],d2[2],d3[2];
int o0[2],o1[2];
// load
for (int n = 0; n < 4; n++)
{
s0[n] = out_tile[n];
s1[n] = out_tile[n+ 4];
s2[n] = out_tile[n+ 8];
s3[n] = out_tile[n+12];
}
// w = A_T * W
for (int n = 0; n < 4; n++)
{
w0[n] = s0[n] + s1[n] + s2[n];
w1[n] = s1[n] - s2[n] + s3[n];
}
// transpose w to w_t
{
d0[0] = w0[0]; d0[1] = w1[0];
d1[0] = w0[1]; d1[1] = w1[1];
d2[0] = w0[2]; d2[1] = w1[2];
d3[0] = w0[3]; d3[1] = w1[3];
}
// Y = A_T * w_t
for (int n = 0; n < 2; n++)
{
o0[n] = d0[n] + d1[n] + d2[n];
o1[n] = d1[n] - d2[n] + d3[n];
}
// save to top blob tm,why right 2,because the G' = G*2
outRow0[0] = o0[0] >> 2;
outRow0[1] = o0[1] >> 2;
outRow1[0] = o1[0] >> 2;
outRow1[1] = o1[1] >> 2;
outRow0 += 2;
outRow1 += 2;
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s1_winograd43_transform_kernel_int8_sse(const Mat& kernel, Mat& kernel_tm, int inch, int outch)
{
kernel_tm.create(6*6, inch, outch, 2ul);
// G
// const float ktm[6][3] = {
// { 1.0f/4, 0.0f, 0.0f},
// { -1.0f/6, -1.0f/6, -1.0f/6},
// { -1.0f/6, 1.0f/6, -1.0f/6},
// { 1.0f/24, 1.0f/12, 1.0f/6},
// { 1.0f/24, -1.0f/12, 1.0f/6},
// { 0.0f, 0.0f, 1.0f}
// };
const short ktm[6][3] = {
{ 6, 0, 0},
{ -4, -4, -4},
{ -4, 4, -4},
{ 1, 2, 4},
{ 1, -2, 4},
{ 0, 0, 24}
};
#pragma omp parallel for
for (int p = 0; p<outch; p++)
{
for (int q = 0; q<inch; q++)
{
const signed char* kernel0 = (const signed char*)kernel + p*inch * 9 + q * 9;
short* kernel_tm0 = kernel_tm.channel(p).row<short>(q);
// transform kernel
const signed char* k0 = kernel0;
const signed char* k1 = kernel0 + 3;
const signed char* k2 = kernel0 + 6;
// h
short tmp[6][3];
for (int i=0; i<6; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j=0; j<6; j++)
{
short* tmpp = &tmp[j][0];
for (int i=0; i<6; i++)
{
kernel_tm0[j*6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
}
static void conv3x3s1_winograd43_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 4n+2, winograd F(4,3)
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 3) / 4 * 4;
outh = (outh + 3) / 4 * 4;
w = outw + 2;
h = outh + 2;
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm/6; // may be the block num in Feathercnn
int nRowBlocks = w_tm/6;
const int tiles = nColBlocks * nRowBlocks;
bottom_blob_tm.create(6*6, tiles, inch, 2u, opt.workspace_allocator);
// BT
// const float itm[4][4] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r03 + r04
// 2 = 4 * (r01 - r02) - r03 + r04
// 3 = -2 * r01 - r02 + 2 * r03 + r04
// 4 = 2 * r01 - r02 - 2 * r03 + r04
// 5 = 4 * r01 - 5 * r03 + r05
#pragma omp parallel for num_threads(opt.num_threads)
for (int q=0; q<inch; q++)
{
const signed char* img = bottom_blob_bordered.channel(q);
short* out_tm0 = bottom_blob_tm.channel(q);
for (int j = 0; j < nColBlocks; j++)
{
const signed char* r0 = img + w * j * 4;
const signed char* r1 = r0 + w;
const signed char* r2 = r1 + w;
const signed char* r3 = r2 + w;
const signed char* r4 = r3 + w;
const signed char* r5 = r4 + w;
for (int i = 0; i < nRowBlocks; i++)
{
short d0[6],d1[6],d2[6],d3[6],d4[6],d5[6];
short w0[6],w1[6],w2[6],w3[6],w4[6],w5[6];
short t0[6],t1[6],t2[6],t3[6],t4[6],t5[6];
// load
for (int n = 0; n < 6; n++)
{
d0[n] = r0[n];
d1[n] = r1[n];
d2[n] = r2[n];
d3[n] = r3[n];
d4[n] = r4[n];
d5[n] = r5[n];
}
// w = B_t * d
for (int n = 0; n < 6; n++)
{
w0[n] = 4*d0[n] - 5*d2[n] + d4[n];
w1[n] = -4*d1[n] - 4*d2[n] + d3[n] + d4[n];
w2[n] = 4*d1[n] - 4*d2[n] - d3[n] + d4[n];
w3[n] = -2*d1[n] - d2[n] + 2*d3[n] + d4[n];
w4[n] = 2*d1[n] - d2[n] - 2*d3[n] + d4[n];
w5[n] = 4*d1[n] - 5*d3[n] + d5[n];
}
// transpose d to d_t
{
t0[0]=w0[0]; t1[0]=w0[1]; t2[0]=w0[2]; t3[0]=w0[3]; t4[0]=w0[4]; t5[0]=w0[5];
t0[1]=w1[0]; t1[1]=w1[1]; t2[1]=w1[2]; t3[1]=w1[3]; t4[1]=w1[4]; t5[1]=w1[5];
t0[2]=w2[0]; t1[2]=w2[1]; t2[2]=w2[2]; t3[2]=w2[3]; t4[2]=w2[4]; t5[2]=w2[5];
t0[3]=w3[0]; t1[3]=w3[1]; t2[3]=w3[2]; t3[3]=w3[3]; t4[3]=w3[4]; t5[3]=w3[5];
t0[4]=w4[0]; t1[4]=w4[1]; t2[4]=w4[2]; t3[4]=w4[3]; t4[4]=w4[4]; t5[4]=w4[5];
t0[5]=w5[0]; t1[5]=w5[1]; t2[5]=w5[2]; t3[5]=w5[3]; t4[5]=w5[4]; t5[5]=w5[5];
}
// d = B_t * d_t
for (int n = 0; n < 6; n++)
{
d0[n] = 4*t0[n] - 5*t2[n] + t4[n];
d1[n] = - 4*t1[n] - 4*t2[n] + t3[n] + t4[n];
d2[n] = 4*t1[n] - 4*t2[n] - t3[n] + t4[n];
d3[n] = - 2*t1[n] - t2[n] + 2*t3[n] + t4[n];
d4[n] = 2*t1[n] - t2[n] - 2*t3[n] + t4[n];
d5[n] = 4*t1[n] - 5*t3[n] + t5[n];
}
// save to out_tm
for (int n = 0; n < 6; n++)
{
out_tm0[n ] = d0[n];
out_tm0[n+ 6] = d1[n];
out_tm0[n+12] = d2[n];
out_tm0[n+18] = d3[n];
out_tm0[n+24] = d4[n];
out_tm0[n+30] = d5[n];
}
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
r5 += 4;
out_tm0 += 36;
}
}
}
}
bottom_blob_bordered = Mat();
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm/6; // may be the block num in Feathercnn
int nRowBlocks = w_tm/6;
const int tiles = nColBlocks * nRowBlocks;
top_blob_tm.create(36, tiles, outch, 4u, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<outch; p++)
{
Mat out0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int i=0; i<tiles; i++)
{
int* output0_tm = out0_tm.row<int>(i);
int sum0[36] = {0};
for (int q=0; q<inch; q++)
{
const short* r0 = bottom_blob_tm.channel(q).row<short>(i);
const short* k0 = kernel0_tm.row<short>(q);
for (int n=0; n<36; n++)
{
sum0[n] += (int)r0[n] * k0[n];
}
}
for (int n=0; n<36; n++)
{
output0_tm[n] = sum0[n];
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator);
{
// AT
// const float itm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + r01 + r02 + r03 + r04
// 1 = r01 - r02 + 2 * (r03 - r04)
// 2 = r01 + r02 + 4 * (r03 + r04)
// 3 = r01 - r02 + 8 * (r03 - r04) + r05
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm/6; // may be the block num in Feathercnn
int nRowBlocks = w_tm/6;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<outch; p++)
{
Mat out_tm = top_blob_tm.channel(p);
Mat out = top_blob_bordered.channel(p);
for (int j=0; j<nColBlocks; j++)
{
int* outRow0 = out.row<int>(j*4);
int* outRow1 = out.row<int>(j*4+1);
int* outRow2 = out.row<int>(j*4+2);
int* outRow3 = out.row<int>(j*4+3);
for(int i=0; i<nRowBlocks; i++)
{
int* out_tile = out_tm.row<int>(j*nRowBlocks + i);
int s0[6],s1[6],s2[6],s3[6],s4[6],s5[6];
int w0[6],w1[6],w2[6],w3[6];
int d0[4],d1[4],d2[4],d3[4],d4[4],d5[4];
int o0[4],o1[4],o2[4],o3[4];
// load
for (int n = 0; n < 6; n++)
{
s0[n] = out_tile[n];
s1[n] = out_tile[n+ 6];
s2[n] = out_tile[n+12];
s3[n] = out_tile[n+18];
s4[n] = out_tile[n+24];
s5[n] = out_tile[n+30];
}
// w = A_T * W
for (int n = 0; n < 6; n++)
{
w0[n] = s0[n] + s1[n] + s2[n] + s3[n] + s4[n];
w1[n] = s1[n] - s2[n] + 2*s3[n] - 2*s4[n];
w2[n] = s1[n] + s2[n] + 4*s3[n] + 4*s4[n];
w3[n] = s1[n] - s2[n] + 8*s3[n] - 8*s4[n] + s5[n];
}
// transpose w to w_t
{
d0[0] = w0[0]; d0[1] = w1[0]; d0[2] = w2[0]; d0[3] = w3[0];
d1[0] = w0[1]; d1[1] = w1[1]; d1[2] = w2[1]; d1[3] = w3[1];
d2[0] = w0[2]; d2[1] = w1[2]; d2[2] = w2[2]; d2[3] = w3[2];
d3[0] = w0[3]; d3[1] = w1[3]; d3[2] = w2[3]; d3[3] = w3[3];
d4[0] = w0[4]; d4[1] = w1[4]; d4[2] = w2[4]; d4[3] = w3[4];
d5[0] = w0[5]; d5[1] = w1[5]; d5[2] = w2[5]; d5[3] = w3[5];
}
// Y = A_T * w_t
for (int n = 0; n < 4; n++)
{
o0[n] = d0[n] + d1[n] + d2[n] + d3[n] + d4[n];
o1[n] = d1[n] - d2[n] + 2*d3[n] - 2*d4[n];
o2[n] = d1[n] + d2[n] + 4*d3[n] + 4*d4[n];
o3[n] = d1[n] - d2[n] + 8*d3[n] - 8*d4[n] + d5[n];
}
// save to top blob tm
for (int n = 0; n < 4; n++)
{
outRow0[n] = o0[n] / 576;
outRow1[n] = o1[n] / 576;
outRow2[n] = o2[n] / 576;
outRow3[n] = o3[n] / 576;
}
outRow0 += 4;
outRow1 += 4;
outRow2 += 4;
outRow3 += 4;
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s2_int8_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2 * outw + w;
const signed char *kernel = _kernel;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
out0.fill(0);
const signed char *kernel0 = (const signed char *)kernel + p * inch * 9;
for (int q = 0; q < inch; q++)
{
int *outptr0 = out0;
const signed char *img0 = bottom_blob.channel(q);
const signed char *r0 = img0;
const signed char *r1 = img0 + w;
const signed char *r2 = img0 + w * 2;
for (int i = 0; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
int sum0 = 0;
sum0 += (int)r0[0] * kernel0[0];
sum0 += (int)r0[1] * kernel0[1];
sum0 += (int)r0[2] * kernel0[2];
sum0 += (int)r1[0] * kernel0[3];
sum0 += (int)r1[1] * kernel0[4];
sum0 += (int)r1[2] * kernel0[5];
sum0 += (int)r2[0] * kernel0[6];
sum0 += (int)r2[1] * kernel0[7];
sum0 += (int)r2[2] * kernel0[8];
*outptr0 += sum0;
r0 += 2;
r1 += 2;
r2 += 2;
outptr0++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
kernel0 += 9;
}
}
}
static void conv3x3s1_int8_dequant_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Mat &_bias, std::vector<float> scales_dequant, const Option& opt)
{
int kernel_w = 3;
int kernel_h = 3;
int stride_w = 1;
int stride_h = 1;
conv_im2col_sgemm_int8_dequant_sse(bottom_blob, top_blob, _kernel, kernel_w, kernel_h, stride_w, stride_h, _bias, scales_dequant, opt);
}
static void conv3x3s2_int8_dequant_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Mat &_bias, std::vector<float> scales_dequant, const Option& opt)
{
int kernel_w = 3;
int kernel_h = 3;
int stride_w = 2;
int stride_h = 2;
conv_im2col_sgemm_int8_dequant_sse(bottom_blob, top_blob, _kernel, kernel_w, kernel_h, stride_w, stride_h, _bias, scales_dequant, opt);
}
static void conv3x3s1_int8_requant_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Mat &_bias, std::vector<float> scales_requant, const Option& opt)
{
int kernel_w = 3;
int kernel_h = 3;
int stride_w = 1;
int stride_h = 1;
conv_im2col_sgemm_int8_requant_sse(bottom_blob, top_blob, _kernel, kernel_w, kernel_h, stride_w, stride_h, _bias, scales_requant, opt);
}
static void conv3x3s2_int8_requant_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Mat &_bias, std::vector<float> scales_requant, const Option& opt)
{
int kernel_w = 3;
int kernel_h = 3;
int stride_w = 2;
int stride_h = 2;
conv_im2col_sgemm_int8_requant_sse(bottom_blob, top_blob, _kernel, kernel_w, kernel_h, stride_w, stride_h, _bias, scales_requant, opt);
}
|
Parser.h | //===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the Parser interface.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_PARSE_PARSER_H
#define LLVM_CLANG_PARSE_PARSER_H
#include "clang/AST/Availability.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/OperatorPrecedence.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Lex/CodeCompletionHandler.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/LoopHint.h"
#include "clang/Sema/Sema.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/SaveAndRestore.h"
#include <memory>
#include <stack>
namespace clang {
class PragmaHandler;
class Scope;
class BalancedDelimiterTracker;
class CorrectionCandidateCallback;
class DeclGroupRef;
class DiagnosticBuilder;
class Parser;
class ParsingDeclRAIIObject;
class ParsingDeclSpec;
class ParsingDeclarator;
class ParsingFieldDeclarator;
class ColonProtectionRAIIObject;
class InMessageExpressionRAIIObject;
class PoisonSEHIdentifiersRAIIObject;
class OMPClause;
class ObjCTypeParamList;
class ObjCTypeParameter;
/// Parser - This implements a parser for the C family of languages. After
/// parsing units of the grammar, productions are invoked to handle whatever has
/// been read.
///
class Parser : public CodeCompletionHandler {
friend class ColonProtectionRAIIObject;
friend class InMessageExpressionRAIIObject;
friend class PoisonSEHIdentifiersRAIIObject;
friend class ObjCDeclContextSwitch;
friend class ParenBraceBracketBalancer;
friend class BalancedDelimiterTracker;
Preprocessor &PP;
/// Tok - The current token we are peeking ahead. All parsing methods assume
/// that this is valid.
Token Tok;
// PrevTokLocation - The location of the token we previously
// consumed. This token is used for diagnostics where we expected to
// see a token following another token (e.g., the ';' at the end of
// a statement).
SourceLocation PrevTokLocation;
unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0;
unsigned short MisplacedModuleBeginCount = 0;
/// Actions - These are the callbacks we invoke as we parse various constructs
/// in the file.
Sema &Actions;
DiagnosticsEngine &Diags;
/// ScopeCache - Cache scopes to reduce malloc traffic.
enum { ScopeCacheSize = 16 };
unsigned NumCachedScopes;
Scope *ScopeCache[ScopeCacheSize];
/// Identifiers used for SEH handling in Borland. These are only
/// allowed in particular circumstances
// __except block
IdentifierInfo *Ident__exception_code,
*Ident___exception_code,
*Ident_GetExceptionCode;
// __except filter expression
IdentifierInfo *Ident__exception_info,
*Ident___exception_info,
*Ident_GetExceptionInfo;
// __finally
IdentifierInfo *Ident__abnormal_termination,
*Ident___abnormal_termination,
*Ident_AbnormalTermination;
/// Contextual keywords for Microsoft extensions.
IdentifierInfo *Ident__except;
mutable IdentifierInfo *Ident_sealed;
/// Ident_super - IdentifierInfo for "super", to support fast
/// comparison.
IdentifierInfo *Ident_super;
/// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and
/// "bool" fast comparison. Only present if AltiVec or ZVector are enabled.
IdentifierInfo *Ident_vector;
IdentifierInfo *Ident_bool;
/// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison.
/// Only present if AltiVec enabled.
IdentifierInfo *Ident_pixel;
/// Objective-C contextual keywords.
mutable IdentifierInfo *Ident_instancetype;
/// Identifier for "introduced".
IdentifierInfo *Ident_introduced;
/// Identifier for "deprecated".
IdentifierInfo *Ident_deprecated;
/// Identifier for "obsoleted".
IdentifierInfo *Ident_obsoleted;
/// Identifier for "unavailable".
IdentifierInfo *Ident_unavailable;
/// Identifier for "message".
IdentifierInfo *Ident_message;
/// Identifier for "strict".
IdentifierInfo *Ident_strict;
/// Identifier for "replacement".
IdentifierInfo *Ident_replacement;
/// Identifiers used by the 'external_source_symbol' attribute.
IdentifierInfo *Ident_language, *Ident_defined_in,
*Ident_generated_declaration;
/// C++0x contextual keywords.
mutable IdentifierInfo *Ident_final;
mutable IdentifierInfo *Ident_GNU_final;
mutable IdentifierInfo *Ident_override;
// C++ type trait keywords that can be reverted to identifiers and still be
// used as type traits.
llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits;
std::unique_ptr<PragmaHandler> AlignHandler;
std::unique_ptr<PragmaHandler> GCCVisibilityHandler;
std::unique_ptr<PragmaHandler> OptionsHandler;
std::unique_ptr<PragmaHandler> PackHandler;
std::unique_ptr<PragmaHandler> MSStructHandler;
std::unique_ptr<PragmaHandler> UnusedHandler;
std::unique_ptr<PragmaHandler> WeakHandler;
std::unique_ptr<PragmaHandler> RedefineExtnameHandler;
std::unique_ptr<PragmaHandler> FPContractHandler;
std::unique_ptr<PragmaHandler> OpenCLExtensionHandler;
std::unique_ptr<PragmaHandler> OpenMPHandler;
std::unique_ptr<PragmaHandler> PCSectionHandler;
std::unique_ptr<PragmaHandler> MSCommentHandler;
std::unique_ptr<PragmaHandler> MSDetectMismatchHandler;
std::unique_ptr<PragmaHandler> MSPointersToMembers;
std::unique_ptr<PragmaHandler> MSVtorDisp;
std::unique_ptr<PragmaHandler> MSInitSeg;
std::unique_ptr<PragmaHandler> MSDataSeg;
std::unique_ptr<PragmaHandler> MSBSSSeg;
std::unique_ptr<PragmaHandler> MSConstSeg;
std::unique_ptr<PragmaHandler> MSCodeSeg;
std::unique_ptr<PragmaHandler> MSSection;
std::unique_ptr<PragmaHandler> MSRuntimeChecks;
std::unique_ptr<PragmaHandler> MSIntrinsic;
std::unique_ptr<PragmaHandler> MSOptimize;
std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler;
std::unique_ptr<PragmaHandler> OptimizeHandler;
std::unique_ptr<PragmaHandler> LoopHintHandler;
std::unique_ptr<PragmaHandler> UnrollHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollHintHandler;
std::unique_ptr<PragmaHandler> FPHandler;
std::unique_ptr<PragmaHandler> STDCFENVHandler;
std::unique_ptr<PragmaHandler> STDCCXLIMITHandler;
std::unique_ptr<PragmaHandler> STDCUnknownHandler;
std::unique_ptr<PragmaHandler> AttributePragmaHandler;
std::unique_ptr<CommentHandler> CommentSemaHandler;
/// Whether the '>' token acts as an operator or not. This will be
/// true except when we are parsing an expression within a C++
/// template argument list, where the '>' closes the template
/// argument list.
bool GreaterThanIsOperator;
/// ColonIsSacred - When this is false, we aggressively try to recover from
/// code like "foo : bar" as if it were a typo for "foo :: bar". This is not
/// safe in case statements and a few other things. This is managed by the
/// ColonProtectionRAIIObject RAII object.
bool ColonIsSacred;
/// When true, we are directly inside an Objective-C message
/// send expression.
///
/// This is managed by the \c InMessageExpressionRAIIObject class, and
/// should not be set directly.
bool InMessageExpression;
/// The "depth" of the template parameters currently being parsed.
unsigned TemplateParameterDepth;
/// RAII class that manages the template parameter depth.
class TemplateParameterDepthRAII {
unsigned &Depth;
unsigned AddedLevels;
public:
explicit TemplateParameterDepthRAII(unsigned &Depth)
: Depth(Depth), AddedLevels(0) {}
~TemplateParameterDepthRAII() {
Depth -= AddedLevels;
}
void operator++() {
++Depth;
++AddedLevels;
}
void addDepth(unsigned D) {
Depth += D;
AddedLevels += D;
}
unsigned getDepth() const { return Depth; }
};
/// Factory object for creating AttributeList objects.
AttributeFactory AttrFactory;
/// Gathers and cleans up TemplateIdAnnotations when parsing of a
/// top-level declaration is finished.
SmallVector<TemplateIdAnnotation *, 16> TemplateIds;
/// Identifiers which have been declared within a tentative parse.
SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers;
IdentifierInfo *getSEHExceptKeyword();
/// True if we are within an Objective-C container while parsing C-like decls.
///
/// This is necessary because Sema thinks we have left the container
/// to parse the C-like decls, meaning Actions.getObjCDeclContext() will
/// be NULL.
bool ParsingInObjCContainer;
/// Whether to skip parsing of function bodies.
///
/// This option can be used, for example, to speed up searches for
/// declarations/definitions when indexing.
bool SkipFunctionBodies;
/// The location of the expression statement that is being parsed right now.
/// Used to determine if an expression that is being parsed is a statement or
/// just a regular sub-expression.
SourceLocation ExprStatementTokLoc;
public:
Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies);
~Parser() override;
const LangOptions &getLangOpts() const { return PP.getLangOpts(); }
const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); }
Preprocessor &getPreprocessor() const { return PP; }
Sema &getActions() const { return Actions; }
AttributeFactory &getAttrFactory() { return AttrFactory; }
const Token &getCurToken() const { return Tok; }
Scope *getCurScope() const { return Actions.getCurScope(); }
void incrementMSManglingNumber() const {
return Actions.incrementMSManglingNumber();
}
Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); }
// Type forwarding. All of these are statically 'void*', but they may all be
// different actual classes based on the actions in place.
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists;
typedef Sema::FullExprArg FullExprArg;
// Parsing methods.
/// Initialize - Warm up the parser.
///
void Initialize();
/// Parse the first top-level declaration in a translation unit.
bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result);
/// ParseTopLevelDecl - Parse one top-level declaration. Returns true if
/// the EOF was encountered.
bool ParseTopLevelDecl(DeclGroupPtrTy &Result);
bool ParseTopLevelDecl() {
DeclGroupPtrTy Result;
return ParseTopLevelDecl(Result);
}
/// ConsumeToken - Consume the current 'peek token' and lex the next one.
/// This does not work with special tokens: string literals, code completion,
/// annotation tokens and balanced tokens must be handled using the specific
/// consume methods.
/// Returns the location of the consumed token.
SourceLocation ConsumeToken() {
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
bool TryConsumeToken(tok::TokenKind Expected) {
if (Tok.isNot(Expected))
return false;
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return true;
}
bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) {
if (!TryConsumeToken(Expected))
return false;
Loc = PrevTokLocation;
return true;
}
/// ConsumeAnyToken - Dispatch to the right Consume* method based on the
/// current token type. This should only be used in cases where the type of
/// the token really isn't known, e.g. in error recovery.
SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) {
if (isTokenParen())
return ConsumeParen();
if (isTokenBracket())
return ConsumeBracket();
if (isTokenBrace())
return ConsumeBrace();
if (isTokenStringLiteral())
return ConsumeStringToken();
if (Tok.is(tok::code_completion))
return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken()
: handleUnexpectedCodeCompletionToken();
if (Tok.isAnnotation())
return ConsumeAnnotationToken();
return ConsumeToken();
}
SourceLocation getEndOfPreviousToken() {
return PP.getLocForEndOfToken(PrevTokLocation);
}
/// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds
/// to the given nullability kind.
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) {
return Actions.getNullabilityKeyword(nullability);
}
private:
//===--------------------------------------------------------------------===//
// Low-Level token peeking and consumption methods.
//
/// isTokenParen - Return true if the cur token is '(' or ')'.
bool isTokenParen() const {
return Tok.isOneOf(tok::l_paren, tok::r_paren);
}
/// isTokenBracket - Return true if the cur token is '[' or ']'.
bool isTokenBracket() const {
return Tok.isOneOf(tok::l_square, tok::r_square);
}
/// isTokenBrace - Return true if the cur token is '{' or '}'.
bool isTokenBrace() const {
return Tok.isOneOf(tok::l_brace, tok::r_brace);
}
/// isTokenStringLiteral - True if this token is a string-literal.
bool isTokenStringLiteral() const {
return tok::isStringLiteral(Tok.getKind());
}
/// isTokenSpecial - True if this token requires special consumption methods.
bool isTokenSpecial() const {
return isTokenStringLiteral() || isTokenParen() || isTokenBracket() ||
isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation();
}
/// Returns true if the current token is '=' or is a type of '='.
/// For typos, give a fixit to '='
bool isTokenEqualOrEqualTypo();
/// Return the current token to the token stream and make the given
/// token the current token.
void UnconsumeToken(Token &Consumed) {
Token Next = Tok;
PP.EnterToken(Consumed);
PP.Lex(Tok);
PP.EnterToken(Next);
}
SourceLocation ConsumeAnnotationToken() {
assert(Tok.isAnnotation() && "wrong consume method");
SourceLocation Loc = Tok.getLocation();
PrevTokLocation = Tok.getAnnotationEndLoc();
PP.Lex(Tok);
return Loc;
}
/// ConsumeParen - This consume method keeps the paren count up-to-date.
///
SourceLocation ConsumeParen() {
assert(isTokenParen() && "wrong consume method");
if (Tok.getKind() == tok::l_paren)
++ParenCount;
else if (ParenCount)
--ParenCount; // Don't let unbalanced )'s drive the count negative.
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBracket - This consume method keeps the bracket count up-to-date.
///
SourceLocation ConsumeBracket() {
assert(isTokenBracket() && "wrong consume method");
if (Tok.getKind() == tok::l_square)
++BracketCount;
else if (BracketCount)
--BracketCount; // Don't let unbalanced ]'s drive the count negative.
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBrace - This consume method keeps the brace count up-to-date.
///
SourceLocation ConsumeBrace() {
assert(isTokenBrace() && "wrong consume method");
if (Tok.getKind() == tok::l_brace)
++BraceCount;
else if (BraceCount)
--BraceCount; // Don't let unbalanced }'s drive the count negative.
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeStringToken - Consume the current 'peek token', lexing a new one
/// and returning the token kind. This method is specific to strings, as it
/// handles string literal concatenation, as per C99 5.1.1.2, translation
/// phase #6.
SourceLocation ConsumeStringToken() {
assert(isTokenStringLiteral() &&
"Should only consume string literals with this method");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// Consume the current code-completion token.
///
/// This routine can be called to consume the code-completion token and
/// continue processing in special cases where \c cutOffParsing() isn't
/// desired, such as token caching or completion with lookahead.
SourceLocation ConsumeCodeCompletionToken() {
assert(Tok.is(tok::code_completion));
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
///\ brief When we are consuming a code-completion token without having
/// matched specific position in the grammar, provide code-completion results
/// based on context.
///
/// \returns the source location of the code-completion token.
SourceLocation handleUnexpectedCodeCompletionToken();
/// Abruptly cut off parsing; mainly used when we have reached the
/// code-completion point.
void cutOffParsing() {
if (PP.isCodeCompletionEnabled())
PP.setCodeCompletionReached();
// Cut off parsing by acting as if we reached the end-of-file.
Tok.setKind(tok::eof);
}
/// Determine if we're at the end of the file or at a transition
/// between modules.
bool isEofOrEom() {
tok::TokenKind Kind = Tok.getKind();
return Kind == tok::eof || Kind == tok::annot_module_begin ||
Kind == tok::annot_module_end || Kind == tok::annot_module_include;
}
/// Checks if the \p Level is valid for use in a fold expression.
bool isFoldOperator(prec::Level Level) const;
/// Checks if the \p Kind is a valid operator for fold expressions.
bool isFoldOperator(tok::TokenKind Kind) const;
/// Initialize all pragma handlers.
void initializePragmaHandlers();
/// Destroy and reset all pragma handlers.
void resetPragmaHandlers();
/// Handle the annotation token produced for #pragma unused(...)
void HandlePragmaUnused();
/// Handle the annotation token produced for
/// #pragma GCC visibility...
void HandlePragmaVisibility();
/// Handle the annotation token produced for
/// #pragma pack...
void HandlePragmaPack();
/// Handle the annotation token produced for
/// #pragma ms_struct...
void HandlePragmaMSStruct();
/// Handle the annotation token produced for
/// #pragma comment...
void HandlePragmaMSComment();
void HandlePragmaMSPointersToMembers();
void HandlePragmaMSVtorDisp();
void HandlePragmaMSPragma();
bool HandlePragmaMSSection(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSSegment(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSInitSeg(StringRef PragmaName,
SourceLocation PragmaLocation);
/// Handle the annotation token produced for
/// #pragma align...
void HandlePragmaAlign();
/// Handle the annotation token produced for
/// #pragma clang __debug dump...
void HandlePragmaDump();
/// Handle the annotation token produced for
/// #pragma weak id...
void HandlePragmaWeak();
/// Handle the annotation token produced for
/// #pragma weak id = id...
void HandlePragmaWeakAlias();
/// Handle the annotation token produced for
/// #pragma redefine_extname...
void HandlePragmaRedefineExtname();
/// Handle the annotation token produced for
/// #pragma STDC FP_CONTRACT...
void HandlePragmaFPContract();
/// Handle the annotation token produced for
/// #pragma clang fp ...
void HandlePragmaFP();
/// Handle the annotation token produced for
/// #pragma OPENCL EXTENSION...
void HandlePragmaOpenCLExtension();
/// Handle the annotation token produced for
/// #pragma clang __debug captured
StmtResult HandlePragmaCaptured();
/// Handle the annotation token produced for
/// #pragma clang loop and #pragma unroll.
bool HandlePragmaLoopHint(LoopHint &Hint);
bool ParsePragmaAttributeSubjectMatchRuleSet(
attr::ParsedSubjectMatchRuleSet &SubjectMatchRules,
SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc);
void HandlePragmaAttribute();
/// GetLookAheadToken - This peeks ahead N tokens and returns that token
/// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1)
/// returns the token after Tok, etc.
///
/// Note that this differs from the Preprocessor's LookAhead method, because
/// the Parser always has one token lexed that the preprocessor doesn't.
///
const Token &GetLookAheadToken(unsigned N) {
if (N == 0 || Tok.is(tok::eof)) return Tok;
return PP.LookAhead(N-1);
}
public:
/// NextToken - This peeks ahead one token and returns it without
/// consuming it.
const Token &NextToken() {
return PP.LookAhead(0);
}
/// getTypeAnnotation - Read a parsed type out of an annotation token.
static ParsedType getTypeAnnotation(const Token &Tok) {
return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue());
}
private:
static void setTypeAnnotation(Token &Tok, ParsedType T) {
Tok.setAnnotationValue(T.getAsOpaquePtr());
}
/// Read an already-translated primary expression out of an annotation
/// token.
static ExprResult getExprAnnotation(const Token &Tok) {
return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue());
}
/// Set the primary expression corresponding to the given annotation
/// token.
static void setExprAnnotation(Token &Tok, ExprResult ER) {
Tok.setAnnotationValue(ER.getAsOpaquePointer());
}
public:
// If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to
// find a type name by attempting typo correction.
bool TryAnnotateTypeOrScopeToken();
bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS,
bool IsNewScope);
bool TryAnnotateCXXScopeToken(bool EnteringContext = false);
private:
enum AnnotatedNameKind {
/// Annotation has failed and emitted an error.
ANK_Error,
/// The identifier is a tentatively-declared name.
ANK_TentativeDecl,
/// The identifier is a template name. FIXME: Add an annotation for that.
ANK_TemplateName,
/// The identifier can't be resolved.
ANK_Unresolved,
/// Annotation was successful.
ANK_Success
};
AnnotatedNameKind
TryAnnotateName(bool IsAddressOfOperand,
std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr);
/// Push a tok::annot_cxxscope token onto the token stream.
void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation);
/// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens,
/// replacing them with the non-context-sensitive keywords. This returns
/// true if the token was replaced.
bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid) {
if (!getLangOpts().AltiVec && !getLangOpts().ZVector)
return false;
if (Tok.getIdentifierInfo() != Ident_vector &&
Tok.getIdentifierInfo() != Ident_bool &&
(!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel))
return false;
return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid);
}
/// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector
/// identifier token, replacing it with the non-context-sensitive __vector.
/// This returns true if the token was replaced.
bool TryAltiVecVectorToken() {
if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) ||
Tok.getIdentifierInfo() != Ident_vector) return false;
return TryAltiVecVectorTokenOutOfLine();
}
bool TryAltiVecVectorTokenOutOfLine();
bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid);
/// Returns true if the current token is the identifier 'instancetype'.
///
/// Should only be used in Objective-C language modes.
bool isObjCInstancetype() {
assert(getLangOpts().ObjC1);
if (Tok.isAnnotation())
return false;
if (!Ident_instancetype)
Ident_instancetype = PP.getIdentifierInfo("instancetype");
return Tok.getIdentifierInfo() == Ident_instancetype;
}
/// TryKeywordIdentFallback - For compatibility with system headers using
/// keywords as identifiers, attempt to convert the current token to an
/// identifier and optionally disable the keyword for the remainder of the
/// translation unit. This returns false if the token was not replaced,
/// otherwise emits a diagnostic and returns true.
bool TryKeywordIdentFallback(bool DisableKeyword);
/// Get the TemplateIdAnnotation from the token.
TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok);
/// TentativeParsingAction - An object that is used as a kind of "tentative
/// parsing transaction". It gets instantiated to mark the token position and
/// after the token consumption is done, Commit() or Revert() is called to
/// either "commit the consumed tokens" or revert to the previously marked
/// token position. Example:
///
/// TentativeParsingAction TPA(*this);
/// ConsumeToken();
/// ....
/// TPA.Revert();
///
class TentativeParsingAction {
Parser &P;
Token PrevTok;
size_t PrevTentativelyDeclaredIdentifierCount;
unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount;
bool isActive;
public:
explicit TentativeParsingAction(Parser& p) : P(p) {
PrevTok = P.Tok;
PrevTentativelyDeclaredIdentifierCount =
P.TentativelyDeclaredIdentifiers.size();
PrevParenCount = P.ParenCount;
PrevBracketCount = P.BracketCount;
PrevBraceCount = P.BraceCount;
P.PP.EnableBacktrackAtThisPos();
isActive = true;
}
void Commit() {
assert(isActive && "Parsing action was finished!");
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.PP.CommitBacktrackedTokens();
isActive = false;
}
void Revert() {
assert(isActive && "Parsing action was finished!");
P.PP.Backtrack();
P.Tok = PrevTok;
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.ParenCount = PrevParenCount;
P.BracketCount = PrevBracketCount;
P.BraceCount = PrevBraceCount;
isActive = false;
}
~TentativeParsingAction() {
assert(!isActive && "Forgot to call Commit or Revert!");
}
};
/// A TentativeParsingAction that automatically reverts in its destructor.
/// Useful for disambiguation parses that will always be reverted.
class RevertingTentativeParsingAction
: private Parser::TentativeParsingAction {
public:
RevertingTentativeParsingAction(Parser &P)
: Parser::TentativeParsingAction(P) {}
~RevertingTentativeParsingAction() { Revert(); }
};
class UnannotatedTentativeParsingAction;
/// ObjCDeclContextSwitch - An object used to switch context from
/// an objective-c decl context to its enclosing decl context and
/// back.
class ObjCDeclContextSwitch {
Parser &P;
Decl *DC;
SaveAndRestore<bool> WithinObjCContainer;
public:
explicit ObjCDeclContextSwitch(Parser &p)
: P(p), DC(p.getObjCDeclContext()),
WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) {
if (DC)
P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC));
}
~ObjCDeclContextSwitch() {
if (DC)
P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC));
}
};
/// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the
/// input. If so, it is consumed and false is returned.
///
/// If a trivial punctuator misspelling is encountered, a FixIt error
/// diagnostic is issued and false is returned after recovery.
///
/// If the input is malformed, this emits the specified diagnostic and true is
/// returned.
bool ExpectAndConsume(tok::TokenKind ExpectedTok,
unsigned Diag = diag::err_expected,
StringRef DiagMsg = "");
/// The parser expects a semicolon and, if present, will consume it.
///
/// If the next token is not a semicolon, this emits the specified diagnostic,
/// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior
/// to the semicolon, consumes that extra token.
bool ExpectAndConsumeSemi(unsigned DiagID);
/// The kind of extra semi diagnostic to emit.
enum ExtraSemiKind {
OutsideFunction = 0,
InsideStruct = 1,
InstanceVariableList = 2,
AfterMemberFunctionDefinition = 3
};
/// Consume any extra semi-colons until the end of the line.
void ConsumeExtraSemi(ExtraSemiKind Kind, unsigned TST = TST_unspecified);
/// Return false if the next token is an identifier. An 'expected identifier'
/// error is emitted otherwise.
///
/// The parser tries to recover from the error by checking if the next token
/// is a C++ keyword when parsing Objective-C++. Return false if the recovery
/// was successful.
bool expectIdentifier();
public:
//===--------------------------------------------------------------------===//
// Scope manipulation
/// ParseScope - Introduces a new scope for parsing. The kind of
/// scope is determined by ScopeFlags. Objects of this type should
/// be created on the stack to coincide with the position where the
/// parser enters the new scope, and this object's constructor will
/// create that new scope. Similarly, once the object is destroyed
/// the parser will exit the scope.
class ParseScope {
Parser *Self;
ParseScope(const ParseScope &) = delete;
void operator=(const ParseScope &) = delete;
public:
// ParseScope - Construct a new object to manage a scope in the
// parser Self where the new Scope is created with the flags
// ScopeFlags, but only when we aren't about to enter a compound statement.
ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true,
bool BeforeCompoundStmt = false)
: Self(Self) {
if (EnteredScope && !BeforeCompoundStmt)
Self->EnterScope(ScopeFlags);
else {
if (BeforeCompoundStmt)
Self->incrementMSManglingNumber();
this->Self = nullptr;
}
}
// Exit - Exit the scope associated with this object now, rather
// than waiting until the object is destroyed.
void Exit() {
if (Self) {
Self->ExitScope();
Self = nullptr;
}
}
~ParseScope() {
Exit();
}
};
/// EnterScope - Start a new scope.
void EnterScope(unsigned ScopeFlags);
/// ExitScope - Pop a scope off the scope stack.
void ExitScope();
private:
/// RAII object used to modify the scope flags for the current scope.
class ParseScopeFlags {
Scope *CurScope;
unsigned OldFlags;
ParseScopeFlags(const ParseScopeFlags &) = delete;
void operator=(const ParseScopeFlags &) = delete;
public:
ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true);
~ParseScopeFlags();
};
//===--------------------------------------------------------------------===//
// Diagnostic Emission and Error recovery.
public:
DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID);
DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID);
DiagnosticBuilder Diag(unsigned DiagID) {
return Diag(Tok, DiagID);
}
private:
void SuggestParentheses(SourceLocation Loc, unsigned DK,
SourceRange ParenRange);
void CheckNestedObjCContexts(SourceLocation AtLoc);
public:
/// Control flags for SkipUntil functions.
enum SkipUntilFlags {
StopAtSemi = 1 << 0, ///< Stop skipping at semicolon
/// Stop skipping at specified token, but don't skip the token itself
StopBeforeMatch = 1 << 1,
StopAtCodeCompletion = 1 << 2 ///< Stop at code completion
};
friend constexpr SkipUntilFlags operator|(SkipUntilFlags L,
SkipUntilFlags R) {
return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) |
static_cast<unsigned>(R));
}
/// SkipUntil - Read tokens until we get to the specified token, then consume
/// it (unless StopBeforeMatch is specified). Because we cannot guarantee
/// that the token will ever occur, this skips to the next token, or to some
/// likely good stopping point. If Flags has StopAtSemi flag, skipping will
/// stop at a ';' character.
///
/// If SkipUntil finds the specified token, it returns true, otherwise it
/// returns false.
bool SkipUntil(tok::TokenKind T,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
return SkipUntil(llvm::makeArrayRef(T), Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2, T3};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(ArrayRef<tok::TokenKind> Toks,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0));
/// SkipMalformedDecl - Read tokens until we get to some likely good stopping
/// point for skipping past a simple-declaration.
void SkipMalformedDecl();
private:
//===--------------------------------------------------------------------===//
// Lexing and parsing of C++ inline methods.
struct ParsingClass;
/// [class.mem]p1: "... the class is regarded as complete within
/// - function bodies
/// - default arguments
/// - exception-specifications (TODO: C++0x)
/// - and brace-or-equal-initializers for non-static data members
/// (including such things in nested classes)."
/// LateParsedDeclarations build the tree of those elements so they can
/// be parsed after parsing the top-level class.
class LateParsedDeclaration {
public:
virtual ~LateParsedDeclaration();
virtual void ParseLexedMethodDeclarations();
virtual void ParseLexedMemberInitializers();
virtual void ParseLexedMethodDefs();
virtual void ParseLexedAttributes();
};
/// Inner node of the LateParsedDeclaration tree that parses
/// all its members recursively.
class LateParsedClass : public LateParsedDeclaration {
public:
LateParsedClass(Parser *P, ParsingClass *C);
~LateParsedClass() override;
void ParseLexedMethodDeclarations() override;
void ParseLexedMemberInitializers() override;
void ParseLexedMethodDefs() override;
void ParseLexedAttributes() override;
private:
Parser *Self;
ParsingClass *Class;
};
/// Contains the lexed tokens of an attribute with arguments that
/// may reference member variables and so need to be parsed at the
/// end of the class declaration after parsing all other member
/// member declarations.
/// FIXME: Perhaps we should change the name of LateParsedDeclaration to
/// LateParsedTokens.
struct LateParsedAttribute : public LateParsedDeclaration {
Parser *Self;
CachedTokens Toks;
IdentifierInfo &AttrName;
SourceLocation AttrNameLoc;
SmallVector<Decl*, 2> Decls;
explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name,
SourceLocation Loc)
: Self(P), AttrName(Name), AttrNameLoc(Loc) {}
void ParseLexedAttributes() override;
void addDecl(Decl *D) { Decls.push_back(D); }
};
// A list of late-parsed attributes. Used by ParseGNUAttributes.
class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> {
public:
LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { }
bool parseSoon() { return ParseSoon; }
private:
bool ParseSoon; // Are we planning to parse these shortly after creation?
};
/// Contains the lexed tokens of a member function definition
/// which needs to be parsed at the end of the class declaration
/// after parsing all other member declarations.
struct LexedMethod : public LateParsedDeclaration {
Parser *Self;
Decl *D;
CachedTokens Toks;
/// Whether this member function had an associated template
/// scope. When true, D is a template declaration.
/// otherwise, it is a member function declaration.
bool TemplateScope;
explicit LexedMethod(Parser* P, Decl *MD)
: Self(P), D(MD), TemplateScope(false) {}
void ParseLexedMethodDefs() override;
};
/// LateParsedDefaultArgument - Keeps track of a parameter that may
/// have a default argument that cannot be parsed yet because it
/// occurs within a member function declaration inside the class
/// (C++ [class.mem]p2).
struct LateParsedDefaultArgument {
explicit LateParsedDefaultArgument(Decl *P,
std::unique_ptr<CachedTokens> Toks = nullptr)
: Param(P), Toks(std::move(Toks)) { }
/// Param - The parameter declaration for this parameter.
Decl *Param;
/// Toks - The sequence of tokens that comprises the default
/// argument expression, not including the '=' or the terminating
/// ')' or ','. This will be NULL for parameters that have no
/// default argument.
std::unique_ptr<CachedTokens> Toks;
};
/// LateParsedMethodDeclaration - A method declaration inside a class that
/// contains at least one entity whose parsing needs to be delayed
/// until the class itself is completely-defined, such as a default
/// argument (C++ [class.mem]p2).
struct LateParsedMethodDeclaration : public LateParsedDeclaration {
explicit LateParsedMethodDeclaration(Parser *P, Decl *M)
: Self(P), Method(M), TemplateScope(false),
ExceptionSpecTokens(nullptr) {}
void ParseLexedMethodDeclarations() override;
Parser* Self;
/// Method - The method declaration.
Decl *Method;
/// Whether this member function had an associated template
/// scope. When true, D is a template declaration.
/// otherwise, it is a member function declaration.
bool TemplateScope;
/// DefaultArgs - Contains the parameters of the function and
/// their default arguments. At least one of the parameters will
/// have a default argument, but all of the parameters of the
/// method will be stored so that they can be reintroduced into
/// scope at the appropriate times.
SmallVector<LateParsedDefaultArgument, 8> DefaultArgs;
/// The set of tokens that make up an exception-specification that
/// has not yet been parsed.
CachedTokens *ExceptionSpecTokens;
};
/// LateParsedMemberInitializer - An initializer for a non-static class data
/// member whose parsing must to be delayed until the class is completely
/// defined (C++11 [class.mem]p2).
struct LateParsedMemberInitializer : public LateParsedDeclaration {
LateParsedMemberInitializer(Parser *P, Decl *FD)
: Self(P), Field(FD) { }
void ParseLexedMemberInitializers() override;
Parser *Self;
/// Field - The field declaration.
Decl *Field;
/// CachedTokens - The sequence of tokens that comprises the initializer,
/// including any leading '='.
CachedTokens Toks;
};
/// LateParsedDeclarationsContainer - During parsing of a top (non-nested)
/// C++ class, its method declarations that contain parts that won't be
/// parsed until after the definition is completed (C++ [class.mem]p2),
/// the method declarations and possibly attached inline definitions
/// will be stored here with the tokens that will be parsed to create those
/// entities.
typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer;
/// Representation of a class that has been parsed, including
/// any member function declarations or definitions that need to be
/// parsed after the corresponding top-level class is complete.
struct ParsingClass {
ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface)
: TopLevelClass(TopLevelClass), TemplateScope(false),
IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) { }
/// Whether this is a "top-level" class, meaning that it is
/// not nested within another class.
bool TopLevelClass : 1;
/// Whether this class had an associated template
/// scope. When true, TagOrTemplate is a template declaration;
/// otherwise, it is a tag declaration.
bool TemplateScope : 1;
/// Whether this class is an __interface.
bool IsInterface : 1;
/// The class or class template whose definition we are parsing.
Decl *TagOrTemplate;
/// LateParsedDeclarations - Method declarations, inline definitions and
/// nested classes that contain pieces whose parsing will be delayed until
/// the top-level class is fully defined.
LateParsedDeclarationsContainer LateParsedDeclarations;
};
/// The stack of classes that is currently being
/// parsed. Nested and local classes will be pushed onto this stack
/// when they are parsed, and removed afterward.
std::stack<ParsingClass *> ClassStack;
ParsingClass &getCurrentClass() {
assert(!ClassStack.empty() && "No lexed method stacks!");
return *ClassStack.top();
}
/// RAII object used to manage the parsing of a class definition.
class ParsingClassDefinition {
Parser &P;
bool Popped;
Sema::ParsingClassState State;
public:
ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass,
bool IsInterface)
: P(P), Popped(false),
State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) {
}
/// Pop this class of the stack.
void Pop() {
assert(!Popped && "Nested class has already been popped");
Popped = true;
P.PopParsingClass(State);
}
~ParsingClassDefinition() {
if (!Popped)
P.PopParsingClass(State);
}
};
/// Contains information about any template-specific
/// information that has been parsed prior to parsing declaration
/// specifiers.
struct ParsedTemplateInfo {
ParsedTemplateInfo()
: Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { }
ParsedTemplateInfo(TemplateParameterLists *TemplateParams,
bool isSpecialization,
bool lastParameterListWasEmpty = false)
: Kind(isSpecialization? ExplicitSpecialization : Template),
TemplateParams(TemplateParams),
LastParameterListWasEmpty(lastParameterListWasEmpty) { }
explicit ParsedTemplateInfo(SourceLocation ExternLoc,
SourceLocation TemplateLoc)
: Kind(ExplicitInstantiation), TemplateParams(nullptr),
ExternLoc(ExternLoc), TemplateLoc(TemplateLoc),
LastParameterListWasEmpty(false){ }
/// The kind of template we are parsing.
enum {
/// We are not parsing a template at all.
NonTemplate = 0,
/// We are parsing a template declaration.
Template,
/// We are parsing an explicit specialization.
ExplicitSpecialization,
/// We are parsing an explicit instantiation.
ExplicitInstantiation
} Kind;
/// The template parameter lists, for template declarations
/// and explicit specializations.
TemplateParameterLists *TemplateParams;
/// The location of the 'extern' keyword, if any, for an explicit
/// instantiation
SourceLocation ExternLoc;
/// The location of the 'template' keyword, for an explicit
/// instantiation.
SourceLocation TemplateLoc;
/// Whether the last template parameter list was empty.
bool LastParameterListWasEmpty;
SourceRange getSourceRange() const LLVM_READONLY;
};
void LexTemplateFunctionForLateParsing(CachedTokens &Toks);
void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT);
static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT);
static void LateTemplateParserCleanupCallback(void *P);
Sema::ParsingClassState
PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface);
void DeallocateParsedClasses(ParsingClass *Class);
void PopParsingClass(Sema::ParsingClassState);
enum CachedInitKind {
CIK_DefaultArgument,
CIK_DefaultInitializer
};
NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS,
AttributeList *AccessAttrs,
ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo,
const VirtSpecifiers& VS,
SourceLocation PureSpecLoc);
void ParseCXXNonStaticMemberInitializer(Decl *VarD);
void ParseLexedAttributes(ParsingClass &Class);
void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D,
bool EnterScope, bool OnDefinition);
void ParseLexedAttribute(LateParsedAttribute &LA,
bool EnterScope, bool OnDefinition);
void ParseLexedMethodDeclarations(ParsingClass &Class);
void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM);
void ParseLexedMethodDefs(ParsingClass &Class);
void ParseLexedMethodDef(LexedMethod &LM);
void ParseLexedMemberInitializers(ParsingClass &Class);
void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI);
void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod);
bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks);
bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK);
bool ConsumeAndStoreConditional(CachedTokens &Toks);
bool ConsumeAndStoreUntil(tok::TokenKind T1,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true) {
return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken);
}
bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true);
//===--------------------------------------------------------------------===//
// C99 6.9: External Definitions.
struct ParsedAttributesWithRange : ParsedAttributes {
ParsedAttributesWithRange(AttributeFactory &factory)
: ParsedAttributes(factory) {}
void clear() {
ParsedAttributes::clear();
Range = SourceRange();
}
SourceRange Range;
};
DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr);
bool isDeclarationAfterDeclarator();
bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator);
DeclGroupPtrTy ParseDeclarationOrFunctionDefinition(
ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr,
AccessSpecifier AS = AS_none);
DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
ParsingDeclSpec &DS,
AccessSpecifier AS);
void SkipFunctionBody();
Decl *ParseFunctionDefinition(ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
LateParsedAttrList *LateParsedAttrs = nullptr);
void ParseKNRParamDeclarations(Declarator &D);
// EndLoc, if non-NULL, is filled with the location of the last token of
// the simple-asm.
ExprResult ParseSimpleAsm(SourceLocation *EndLoc = nullptr);
ExprResult ParseAsmStringLiteral();
// Objective-C External Declarations
void MaybeSkipAttributes(tok::ObjCKeywordKind Kind);
DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs);
DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc);
Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
ParsedAttributes &prefixAttrs);
class ObjCTypeParamListScope;
ObjCTypeParamList *parseObjCTypeParamList();
ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs(
ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc,
SmallVectorImpl<IdentifierLocPair> &protocolIdents,
SourceLocation &rAngleLoc, bool mayBeProtocolList = true);
void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc,
BalancedDelimiterTracker &T,
SmallVectorImpl<Decl *> &AllIvarDecls,
bool RBraceMissing);
void ParseObjCClassInstanceVariables(Decl *interfaceDecl,
tok::ObjCKeywordKind visibility,
SourceLocation atLoc);
bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P,
SmallVectorImpl<SourceLocation> &PLocs,
bool WarnOnDeclarations,
bool ForObjCContainer,
SourceLocation &LAngleLoc,
SourceLocation &EndProtoLoc,
bool consumeLastToken);
/// Parse the first angle-bracket-delimited clause for an
/// Objective-C object or object pointer type, which may be either
/// type arguments or protocol qualifiers.
void parseObjCTypeArgsOrProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken,
bool warnOnIncompleteProtocols);
/// Parse either Objective-C type arguments or protocol qualifiers; if the
/// former, also parse protocol qualifiers afterward.
void parseObjCTypeArgsAndProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken);
/// Parse a protocol qualifier type such as '<NSCopying>', which is
/// an anachronistic way of writing 'id<NSCopying>'.
TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc);
/// Parse Objective-C type arguments and protocol qualifiers, extending the
/// current type with the parsed result.
TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc,
ParsedType type,
bool consumeLastToken,
SourceLocation &endLoc);
void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
Decl *CDecl);
DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc,
ParsedAttributes &prefixAttrs);
struct ObjCImplParsingDataRAII {
Parser &P;
Decl *Dcl;
bool HasCFunction;
typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer;
LateParsedObjCMethodContainer LateParsedObjCMethods;
ObjCImplParsingDataRAII(Parser &parser, Decl *D)
: P(parser), Dcl(D), HasCFunction(false) {
P.CurParsedObjCImpl = this;
Finished = false;
}
~ObjCImplParsingDataRAII();
void finish(SourceRange AtEnd);
bool isFinished() const { return Finished; }
private:
bool Finished;
};
ObjCImplParsingDataRAII *CurParsedObjCImpl;
void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl);
DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc);
DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd);
Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc);
Decl *ParseObjCPropertySynthesize(SourceLocation atLoc);
Decl *ParseObjCPropertyDynamic(SourceLocation atLoc);
IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation);
// Definitions for Objective-c context sensitive keywords recognition.
enum ObjCTypeQual {
objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref,
objc_nonnull, objc_nullable, objc_null_unspecified,
objc_NumQuals
};
IdentifierInfo *ObjCTypeQuals[objc_NumQuals];
bool isTokIdentifier_in() const;
ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx,
ParsedAttributes *ParamAttrs);
void ParseObjCMethodRequirement();
Decl *ParseObjCMethodPrototype(
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition = true);
Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType,
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition=true);
void ParseObjCPropertyAttribute(ObjCDeclSpec &DS);
Decl *ParseObjCMethodDefinition();
public:
//===--------------------------------------------------------------------===//
// C99 6.5: Expressions.
/// TypeCastState - State whether an expression is or may be a type cast.
enum TypeCastState {
NotTypeCast = 0,
MaybeTypeCast,
IsTypeCast
};
ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpressionInExprEvalContext(
TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstraintExpression();
// Expr that doesn't include commas.
ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks,
unsigned &NumLineToksConsumed,
bool IsUnevaluated);
private:
ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc);
ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc);
ExprResult ParseRHSOfBinaryExpression(ExprResult LHS,
prec::Level MinPrec);
ExprResult ParseCastExpression(bool isUnaryExpression,
bool isAddressOfOperand,
bool &NotCastExpr,
TypeCastState isTypeCast,
bool isVectorLiteral = false);
ExprResult ParseCastExpression(bool isUnaryExpression,
bool isAddressOfOperand = false,
TypeCastState isTypeCast = NotTypeCast,
bool isVectorLiteral = false);
/// Returns true if the next token cannot start an expression.
bool isNotExpressionStart();
/// Returns true if the next token would start a postfix-expression
/// suffix.
bool isPostfixExpressionSuffixStart() {
tok::TokenKind K = Tok.getKind();
return (K == tok::l_square || K == tok::l_paren ||
K == tok::period || K == tok::arrow ||
K == tok::plusplus || K == tok::minusminus);
}
bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less);
ExprResult ParsePostfixExpressionSuffix(ExprResult LHS);
ExprResult ParseUnaryExprOrTypeTraitExpression();
ExprResult ParseBuiltinPrimaryExpression();
ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
bool &isCastExpr,
ParsedType &CastTy,
SourceRange &CastRange);
typedef SmallVector<Expr*, 20> ExprListTy;
typedef SmallVector<SourceLocation, 20> CommaLocsTy;
/// ParseExpressionList - Used for C/C++ (argument-)expression-list.
bool ParseExpressionList(
SmallVectorImpl<Expr *> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs,
llvm::function_ref<void()> Completer = llvm::function_ref<void()>());
/// ParseSimpleExpressionList - A simple comma-separated list of expressions,
/// used for misc language extensions.
bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs);
/// ParenParseOption - Control what ParseParenExpression will parse.
enum ParenParseOption {
SimpleExpr, // Only parse '(' expression ')'
CompoundStmt, // Also allow '(' compound-statement ')'
CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}'
CastExpr // Also allow '(' type-name ')' <anything>
};
ExprResult ParseParenExpression(ParenParseOption &ExprType,
bool stopIfCastExpr,
bool isTypeCast,
ParsedType &CastTy,
SourceLocation &RParenLoc);
ExprResult ParseCXXAmbiguousParenExpression(
ParenParseOption &ExprType, ParsedType &CastTy,
BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt);
ExprResult ParseCompoundLiteralExpression(ParsedType Ty,
SourceLocation LParenLoc,
SourceLocation RParenLoc);
ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false);
ExprResult ParseGenericSelectionExpression();
ExprResult ParseObjCBoolLiteral();
ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T);
//===--------------------------------------------------------------------===//
// C++ Expressions
ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand,
Token &Replacement);
ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false);
bool areTokensAdjacent(const Token &A, const Token &B);
void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr,
bool EnteringContext, IdentifierInfo &II,
CXXScopeSpec &SS);
bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext,
bool *MayBePseudoDestructor = nullptr,
bool IsTypename = false,
IdentifierInfo **LastII = nullptr,
bool OnlyNamespace = false);
//===--------------------------------------------------------------------===//
// C++0x 5.1.2: Lambda expressions
// [...] () -> type {...}
ExprResult ParseLambdaExpression();
ExprResult TryParseLambdaExpression();
Optional<unsigned> ParseLambdaIntroducer(LambdaIntroducer &Intro,
bool *SkippedInits = nullptr);
bool TryParseLambdaIntroducer(LambdaIntroducer &Intro);
ExprResult ParseLambdaExpressionAfterIntroducer(
LambdaIntroducer &Intro);
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Casts
ExprResult ParseCXXCasts();
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Type Identification
ExprResult ParseCXXTypeid();
//===--------------------------------------------------------------------===//
// C++ : Microsoft __uuidof Expression
ExprResult ParseCXXUuidof();
//===--------------------------------------------------------------------===//
// C++ 5.2.4: C++ Pseudo-Destructor Expressions
ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
ParsedType ObjectType);
//===--------------------------------------------------------------------===//
// C++ 9.3.2: C++ 'this' pointer
ExprResult ParseCXXThis();
//===--------------------------------------------------------------------===//
// C++ 15: C++ Throw Expression
ExprResult ParseThrowExpression();
ExceptionSpecificationType tryParseExceptionSpecification(
bool Delayed,
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &DynamicExceptions,
SmallVectorImpl<SourceRange> &DynamicExceptionRanges,
ExprResult &NoexceptExpr,
CachedTokens *&ExceptionSpecTokens);
// EndLoc is filled with the location of the last token of the specification.
ExceptionSpecificationType ParseDynamicExceptionSpecification(
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &Exceptions,
SmallVectorImpl<SourceRange> &Ranges);
//===--------------------------------------------------------------------===//
// C++0x 8: Function declaration trailing-return-type
TypeResult ParseTrailingReturnType(SourceRange &Range,
bool MayBeFollowedByDirectInit);
//===--------------------------------------------------------------------===//
// C++ 2.13.5: C++ Boolean Literals
ExprResult ParseCXXBoolLiteral();
//===--------------------------------------------------------------------===//
// C++ 5.2.3: Explicit type conversion (functional notation)
ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS);
/// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers.
/// This should only be called when the current token is known to be part of
/// simple-type-specifier.
void ParseCXXSimpleTypeSpecifier(DeclSpec &DS);
bool ParseCXXTypeSpecifierSeq(DeclSpec &DS);
//===--------------------------------------------------------------------===//
// C++ 5.3.4 and 5.3.5: C++ new and delete
bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs,
Declarator &D);
void ParseDirectNewDeclarator(Declarator &D);
ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start);
ExprResult ParseCXXDeleteExpression(bool UseGlobal,
SourceLocation Start);
//===--------------------------------------------------------------------===//
// C++ if/switch/while condition expression.
Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt,
SourceLocation Loc,
Sema::ConditionKind CK);
//===--------------------------------------------------------------------===//
// C++ Coroutines
ExprResult ParseCoyieldExpression();
//===--------------------------------------------------------------------===//
// C99 6.7.8: Initialization.
/// ParseInitializer
/// initializer: [C99 6.7.8]
/// assignment-expression
/// '{' ...
ExprResult ParseInitializer() {
if (Tok.isNot(tok::l_brace))
return ParseAssignmentExpression();
return ParseBraceInitializer();
}
bool MayBeDesignationStart();
ExprResult ParseBraceInitializer();
ExprResult ParseInitializerWithPotentialDesignator();
//===--------------------------------------------------------------------===//
// clang Expressions
ExprResult ParseBlockLiteralExpression(); // ^{...}
//===--------------------------------------------------------------------===//
// Objective-C Expressions
ExprResult ParseObjCAtExpression(SourceLocation AtLocation);
ExprResult ParseObjCStringLiteral(SourceLocation AtLoc);
ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc);
ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue);
ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc);
ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc);
ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc);
ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc);
bool isSimpleObjCMessageExpression();
ExprResult ParseObjCMessageExpression();
ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc,
SourceLocation SuperLoc,
ParsedType ReceiverType,
Expr *ReceiverExpr);
ExprResult ParseAssignmentExprWithObjCMessageExprStart(
SourceLocation LBracloc, SourceLocation SuperLoc,
ParsedType ReceiverType, Expr *ReceiverExpr);
bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr);
//===--------------------------------------------------------------------===//
// C99 6.8: Statements and Blocks.
/// A SmallVector of statements, with stack size 32 (as that is the only one
/// used.)
typedef SmallVector<Stmt*, 32> StmtVector;
/// A SmallVector of expressions, with stack size 12 (the maximum used.)
typedef SmallVector<Expr*, 12> ExprVector;
/// A SmallVector of types.
typedef SmallVector<ParsedType, 12> TypeVector;
StmtResult ParseStatement(SourceLocation *TrailingElseLoc = nullptr,
bool AllowOpenMPStandalone = false);
enum AllowedConstructsKind {
/// Allow any declarations, statements, OpenMP directives.
ACK_Any,
/// Allow only statements and non-standalone OpenMP directives.
ACK_StatementsOpenMPNonStandalone,
/// Allow statements and all executable OpenMP directives
ACK_StatementsOpenMPAnyExecutable
};
StmtResult
ParseStatementOrDeclaration(StmtVector &Stmts, AllowedConstructsKind Allowed,
SourceLocation *TrailingElseLoc = nullptr);
StmtResult ParseStatementOrDeclarationAfterAttributes(
StmtVector &Stmts,
AllowedConstructsKind Allowed,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
StmtResult ParseExprStatement();
StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs);
StmtResult ParseCaseStatement(bool MissingCase = false,
ExprResult Expr = ExprResult());
StmtResult ParseDefaultStatement();
StmtResult ParseCompoundStatement(bool isStmtExpr = false);
StmtResult ParseCompoundStatement(bool isStmtExpr,
unsigned ScopeFlags);
void ParseCompoundStatementLeadingPragmas();
StmtResult ParseCompoundStatementBody(bool isStmtExpr = false);
bool ParseParenExprOrCondition(StmtResult *InitStmt,
Sema::ConditionResult &CondResult,
SourceLocation Loc,
Sema::ConditionKind CK);
StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseDoStatement();
StmtResult ParseForStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseGotoStatement();
StmtResult ParseContinueStatement();
StmtResult ParseBreakStatement();
StmtResult ParseReturnStatement();
StmtResult ParseAsmStatement(bool &msAsm);
StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc);
StmtResult ParsePragmaLoopHint(StmtVector &Stmts,
AllowedConstructsKind Allowed,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
/// Describes the behavior that should be taken for an __if_exists
/// block.
enum IfExistsBehavior {
/// Parse the block; this code is always used.
IEB_Parse,
/// Skip the block entirely; this code is never used.
IEB_Skip,
/// Parse the block as a dependent block, which may be used in
/// some template instantiations but not others.
IEB_Dependent
};
/// Describes the condition of a Microsoft __if_exists or
/// __if_not_exists block.
struct IfExistsCondition {
/// The location of the initial keyword.
SourceLocation KeywordLoc;
/// Whether this is an __if_exists block (rather than an
/// __if_not_exists block).
bool IsIfExists;
/// Nested-name-specifier preceding the name.
CXXScopeSpec SS;
/// The name we're looking for.
UnqualifiedId Name;
/// The behavior of this __if_exists or __if_not_exists block
/// should.
IfExistsBehavior Behavior;
};
bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result);
void ParseMicrosoftIfExistsStatement(StmtVector &Stmts);
void ParseMicrosoftIfExistsExternalDeclaration();
void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType,
AccessSpecifier& CurAS);
bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs,
bool &InitExprsOk);
bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names,
SmallVectorImpl<Expr *> &Constraints,
SmallVectorImpl<Expr *> &Exprs);
//===--------------------------------------------------------------------===//
// C++ 6: Statements and Blocks
StmtResult ParseCXXTryBlock();
StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false);
StmtResult ParseCXXCatchBlock(bool FnCatch = false);
//===--------------------------------------------------------------------===//
// MS: SEH Statements and Blocks
StmtResult ParseSEHTryBlock();
StmtResult ParseSEHExceptBlock(SourceLocation Loc);
StmtResult ParseSEHFinallyBlock(SourceLocation Loc);
StmtResult ParseSEHLeaveStatement();
//===--------------------------------------------------------------------===//
// Objective-C Statements
StmtResult ParseObjCAtStatement(SourceLocation atLoc);
StmtResult ParseObjCTryStmt(SourceLocation atLoc);
StmtResult ParseObjCThrowStmt(SourceLocation atLoc);
StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc);
StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc);
//===--------------------------------------------------------------------===//
// C99 6.7: Declarations.
/// A context for parsing declaration specifiers. TODO: flesh this
/// out, there are other significant restrictions on specifiers than
/// would be best implemented in the parser.
enum class DeclSpecContext {
DSC_normal, // normal context
DSC_class, // class context, enables 'friend'
DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list
DSC_trailing, // C++11 trailing-type-specifier in a trailing return type
DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration
DSC_top_level, // top-level/namespace declaration context
DSC_template_param, // template parameter context
DSC_template_type_arg, // template type argument context
DSC_objc_method_result, // ObjC method result context, enables 'instancetype'
DSC_condition // condition declaration context
};
/// Is this a context in which we are parsing just a type-specifier (or
/// trailing-type-specifier)?
static bool isTypeSpecifier(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_condition:
return false;
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_type_specifier:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return true;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Is this a context in which we can perform class template argument
/// deduction?
static bool isClassTemplateDeductionContext(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_condition:
case DeclSpecContext::DSC_type_specifier:
return true;
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return false;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Information on a C++0x for-range-initializer found while parsing a
/// declaration which turns out to be a for-range-declaration.
struct ForRangeInit {
SourceLocation ColonLoc;
ExprResult RangeExpr;
bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); }
};
DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs);
DeclGroupPtrTy ParseSimpleDeclaration(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs,
bool RequireSemi,
ForRangeInit *FRI = nullptr);
bool MightBeDeclarator(DeclaratorContext Context);
DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context,
SourceLocation *DeclEnd = nullptr,
ForRangeInit *FRI = nullptr);
Decl *ParseDeclarationAfterDeclarator(Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo());
bool ParseAsmAttributesAfterDeclarator(Declarator &D);
Decl *ParseDeclarationAfterDeclaratorAndAttributes(
Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ForRangeInit *FRI = nullptr);
Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope);
Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope);
/// When in code-completion, skip parsing of the function/method body
/// unless the body contains the code-completion point.
///
/// \returns true if the function body was skipped.
bool trySkippingFunctionBody();
bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC,
ParsedAttributesWithRange &Attrs);
DeclSpecContext
getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context);
void ParseDeclarationSpecifiers(
DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal,
LateParsedAttrList *LateAttrs = nullptr);
bool DiagnoseMissingSemiAfterTagDefinition(
DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext,
LateParsedAttrList *LateAttrs = nullptr);
void ParseSpecifierQualifierList(
DeclSpec &DS, AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal);
void ParseObjCTypeQualifierList(ObjCDeclSpec &DS,
DeclaratorContext Context);
void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC);
void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl);
void ParseStructUnionBody(SourceLocation StartLoc, unsigned TagType,
Decl *TagDecl);
void ParseStructDeclaration(
ParsingDeclSpec &DS,
llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback);
bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false);
bool isTypeSpecifierQualifier();
/// isKnownToBeTypeSpecifier - Return true if we know that the specified token
/// is definitely a type-specifier. Return false if it isn't part of a type
/// specifier or if we're not sure.
bool isKnownToBeTypeSpecifier(const Token &Tok) const;
/// Return true if we know that we are definitely looking at a
/// decl-specifier, and isn't part of an expression such as a function-style
/// cast. Return false if it's no a decl-specifier, or we're not sure.
bool isKnownToBeDeclarationSpecifier() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationSpecifier() == TPResult::True;
return isDeclarationSpecifier(true);
}
/// isDeclarationStatement - Disambiguates between a declaration or an
/// expression statement, when parsing function bodies.
/// Returns true for declaration, false for expression.
bool isDeclarationStatement() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationStatement();
return isDeclarationSpecifier(true);
}
/// isForInitDeclaration - Disambiguates between a declaration or an
/// expression in the context of the C 'clause-1' or the C++
// 'for-init-statement' part of a 'for' statement.
/// Returns true for declaration, false for expression.
bool isForInitDeclaration() {
if (getLangOpts().CPlusPlus)
return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true);
return isDeclarationSpecifier(true);
}
/// Determine whether this is a C++1z for-range-identifier.
bool isForRangeIdentifier();
/// Determine whether we are currently at the start of an Objective-C
/// class message that appears to be missing the open bracket '['.
bool isStartOfObjCClassMessageMissingOpenBracket();
/// Starting with a scope specifier, identifier, or
/// template-id that refers to the current class, determine whether
/// this is a constructor declarator.
bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false);
/// Specifies the context in which type-id/expression
/// disambiguation will occur.
enum TentativeCXXTypeIdContext {
TypeIdInParens,
TypeIdUnambiguous,
TypeIdAsTemplateArgument
};
/// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know
/// whether the parens contain an expression or a type-id.
/// Returns true for a type-id and false for an expression.
bool isTypeIdInParens(bool &isAmbiguous) {
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdInParens, isAmbiguous);
isAmbiguous = false;
return isTypeSpecifierQualifier();
}
bool isTypeIdInParens() {
bool isAmbiguous;
return isTypeIdInParens(isAmbiguous);
}
/// Checks if the current tokens form type-id or expression.
/// It is similar to isTypeIdInParens but does not suppose that type-id
/// is in parenthesis.
bool isTypeIdUnambiguously() {
bool IsAmbiguous;
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous);
return isTypeSpecifierQualifier();
}
/// isCXXDeclarationStatement - C++-specialized function that disambiguates
/// between a declaration or an expression statement, when parsing function
/// bodies. Returns true for declaration, false for expression.
bool isCXXDeclarationStatement();
/// isCXXSimpleDeclaration - C++-specialized function that disambiguates
/// between a simple-declaration or an expression-statement.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
/// Returns false if the statement is disambiguated as expression.
bool isCXXSimpleDeclaration(bool AllowForRangeDecl);
/// isCXXFunctionDeclarator - Disambiguates between a function declarator or
/// a constructor-style initializer, when parsing declaration statements.
/// Returns true for function declarator and false for constructor-style
/// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration
/// might be a constructor-style initializer.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr);
struct ConditionDeclarationOrInitStatementState;
enum class ConditionOrInitStatement {
Expression, ///< Disambiguated as an expression (either kind).
ConditionDecl, ///< Disambiguated as the declaration form of condition.
InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement.
Error ///< Can't be any of the above!
};
/// Disambiguates between the different kinds of things that can happen
/// after 'if (' or 'switch ('. This could be one of two different kinds of
/// declaration (depending on whether there is a ';' later) or an expression.
ConditionOrInitStatement
isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt);
bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous);
bool isCXXTypeId(TentativeCXXTypeIdContext Context) {
bool isAmbiguous;
return isCXXTypeId(Context, isAmbiguous);
}
/// TPResult - Used as the result value for functions whose purpose is to
/// disambiguate C++ constructs by "tentatively parsing" them.
enum class TPResult {
True, False, Ambiguous, Error
};
/// Based only on the given token kind, determine whether we know that
/// we're at the start of an expression or a type-specifier-seq (which may
/// be an expression, in C++).
///
/// This routine does not attempt to resolve any of the trick cases, e.g.,
/// those involving lookup of identifiers.
///
/// \returns \c TPR_true if this token starts an expression, \c TPR_false if
/// this token starts a type-specifier-seq, or \c TPR_ambiguous if it cannot
/// tell.
TPResult isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind);
/// isCXXDeclarationSpecifier - Returns TPResult::True if it is a
/// declaration specifier, TPResult::False if it is not,
/// TPResult::Ambiguous if it could be either a decl-specifier or a
/// function-style cast, and TPResult::Error if a parsing error was
/// encountered. If it could be a braced C++11 function-style cast, returns
/// BracedCastResult.
/// Doesn't consume tokens.
TPResult
isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False,
bool *HasMissingTypename = nullptr);
/// Given that isCXXDeclarationSpecifier returns \c TPResult::True or
/// \c TPResult::Ambiguous, determine whether the decl-specifier would be
/// a type-specifier other than a cv-qualifier.
bool isCXXDeclarationSpecifierAType();
/// Determine whether an identifier has been tentatively declared as a
/// non-type. Such tentative declarations should not be found to name a type
/// during a tentative parse, but also should not be annotated as a non-type.
bool isTentativelyDeclared(IdentifierInfo *II);
// "Tentative parsing" functions, used for disambiguation. If a parsing error
// is encountered they will return TPResult::Error.
// Returning TPResult::True/False indicates that the ambiguity was
// resolved and tentative parsing may stop. TPResult::Ambiguous indicates
// that more tentative parsing is necessary for disambiguation.
// They all consume tokens, so backtracking should be used after calling them.
TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl);
TPResult TryParseTypeofSpecifier();
TPResult TryParseProtocolQualifiers();
TPResult TryParsePtrOperatorSeq();
TPResult TryParseOperatorId();
TPResult TryParseInitDeclaratorList();
TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true,
bool mayHaveDirectInit = false);
TPResult
TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr,
bool VersusTemplateArg = false);
TPResult TryParseFunctionDeclarator();
TPResult TryParseBracketDeclarator();
TPResult TryConsumeDeclarationSpecifier();
public:
TypeResult ParseTypeName(SourceRange *Range = nullptr,
DeclaratorContext Context
= DeclaratorContext::TypeNameContext,
AccessSpecifier AS = AS_none,
Decl **OwnedType = nullptr,
ParsedAttributes *Attrs = nullptr);
private:
void ParseBlockId(SourceLocation CaretLoc);
/// Are [[]] attributes enabled?
bool standardAttributesAllowed() const {
const LangOptions &LO = getLangOpts();
return LO.DoubleSquareBracketAttributes;
}
// Check for the start of an attribute-specifier-seq in a context where an
// attribute is not allowed.
bool CheckProhibitedCXX11Attribute() {
assert(Tok.is(tok::l_square));
if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square))
return false;
return DiagnoseProhibitedCXX11Attribute();
}
bool DiagnoseProhibitedCXX11Attribute();
void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation) {
if (!standardAttributesAllowed())
return;
if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) &&
Tok.isNot(tok::kw_alignas))
return;
DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation);
}
void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation);
void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs,
DeclSpec &DS, Sema::TagUseKind TUK);
// FixItLoc = possible correct location for the attributes
void ProhibitAttributes(ParsedAttributesWithRange &attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (!attrs.Range.isValid()) return;
DiagnoseProhibitedAttributes(attrs, FixItLoc);
attrs.clear();
}
void DiagnoseProhibitedAttributes(ParsedAttributesWithRange &attrs,
SourceLocation FixItLoc);
// Forbid C++11 and C2x attributes that appear on certain syntactic locations
// which standard permits but we don't supported yet, for example, attributes
// appertain to decl specifiers.
void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs,
unsigned DiagID);
/// Skip C++11 and C2x attributes and return the end location of the
/// last one.
/// \returns SourceLocation() if there are no attributes.
SourceLocation SkipCXX11Attributes();
/// Diagnose and skip C++11 and C2x attributes that appear in syntactic
/// locations where attributes are not allowed.
void DiagnoseAndSkipCXX11Attributes();
/// Parses syntax-generic attribute arguments for attributes which are
/// known to the implementation, and adds them to the given ParsedAttributes
/// list with the given attribute syntax. Returns the number of arguments
/// parsed for the attribute.
unsigned
ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
void MaybeParseGNUAttributes(Declarator &D,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute)) {
ParsedAttributes attrs(AttrFactory);
SourceLocation endLoc;
ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D);
D.takeAttributes(attrs, endLoc);
}
}
void MaybeParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute))
ParseGNUAttributes(attrs, endLoc, LateAttrs);
}
void ParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr,
Declarator *D = nullptr);
void ParseGNUAttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
AttributeList::Syntax Syntax,
Declarator *D);
IdentifierLoc *ParseIdentifierLoc();
unsigned
ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
void MaybeParseCXX11Attributes(Declarator &D) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrs(AttrFactory);
SourceLocation endLoc;
ParseCXX11Attributes(attrs, &endLoc);
D.takeAttributes(attrs, endLoc);
}
}
void MaybeParseCXX11Attributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrsWithRange(AttrFactory);
ParseCXX11Attributes(attrsWithRange, endLoc);
attrs.takeAllFrom(attrsWithRange);
}
}
void MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *endLoc = nullptr,
bool OuterMightBeMessageSend = false) {
if (standardAttributesAllowed() &&
isCXX11AttributeSpecifier(false, OuterMightBeMessageSend))
ParseCXX11Attributes(attrs, endLoc);
}
void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs,
SourceLocation *EndLoc = nullptr);
void ParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *EndLoc = nullptr);
/// Parses a C++11 (or C2x)-style attribute argument list. Returns true
/// if this results in adding an attribute to the ParsedAttributes list.
bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc);
IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc);
void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square))
ParseMicrosoftAttributes(attrs, endLoc);
}
void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs);
void ParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr);
void MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr) {
const auto &LO = getLangOpts();
if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec))
ParseMicrosoftDeclSpecs(Attrs, End);
}
void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr);
bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs);
void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs);
void DiagnoseAndSkipExtendedMicrosoftTypeAttributes();
SourceLocation SkipExtendedMicrosoftTypeAttributes();
void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs);
void ParseBorlandTypeAttributes(ParsedAttributes &attrs);
void ParseOpenCLKernelAttributes(ParsedAttributes &attrs);
void ParseOpenCLQualifiers(ParsedAttributes &Attrs);
/// Parses opencl_unroll_hint attribute if language is OpenCL v2.0
/// or higher.
/// \return false if error happens.
bool MaybeParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs) {
if (getLangOpts().OpenCL)
return ParseOpenCLUnrollHintAttribute(Attrs);
return true;
}
/// Parses opencl_unroll_hint attribute.
/// \return false if error happens.
bool ParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs);
void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs);
VersionTuple ParseVersionTuple(SourceRange &Range);
void ParseAvailabilityAttribute(IdentifierInfo &Availability,
SourceLocation AvailabilityLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
Optional<AvailabilitySpec> ParseAvailabilitySpec();
ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc);
void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol,
SourceLocation Loc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated,
SourceLocation ObjCBridgeRelatedLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
void ParseAttributeWithTypeArg(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
void ParseTypeofSpecifier(DeclSpec &DS);
SourceLocation ParseDecltypeSpecifier(DeclSpec &DS);
void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ParseUnderlyingTypeSpecifier(DeclSpec &DS);
void ParseAtomicSpecifier(DeclSpec &DS);
ExprResult ParseAlignArgument(SourceLocation Start,
SourceLocation &EllipsisLoc);
void ParseAlignmentSpecifier(ParsedAttributes &Attrs,
SourceLocation *endLoc = nullptr);
VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const;
VirtSpecifiers::Specifier isCXX11VirtSpecifier() const {
return isCXX11VirtSpecifier(Tok);
}
void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface,
SourceLocation FriendLoc);
bool isCXX11FinalKeyword() const;
/// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to
/// enter a new C++ declarator scope and exit it when the function is
/// finished.
class DeclaratorScopeObj {
Parser &P;
CXXScopeSpec &SS;
bool EnteredScope;
bool CreatedScope;
public:
DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss)
: P(p), SS(ss), EnteredScope(false), CreatedScope(false) {}
void EnterDeclaratorScope() {
assert(!EnteredScope && "Already entered the scope!");
assert(SS.isSet() && "C++ scope was not set!");
CreatedScope = true;
P.EnterScope(0); // Not a decl scope.
if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS))
EnteredScope = true;
}
~DeclaratorScopeObj() {
if (EnteredScope) {
assert(SS.isSet() && "C++ scope was cleared ?");
P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS);
}
if (CreatedScope)
P.ExitScope();
}
};
/// ParseDeclarator - Parse and verify a newly-initialized declarator.
void ParseDeclarator(Declarator &D);
/// A function that parses a variant of direct-declarator.
typedef void (Parser::*DirectDeclParseFunction)(Declarator&);
void ParseDeclaratorInternal(Declarator &D,
DirectDeclParseFunction DirectDeclParser);
enum AttrRequirements {
AR_NoAttributesParsed = 0, ///< No attributes are diagnosed.
AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes.
AR_GNUAttributesParsed = 1 << 1,
AR_CXX11AttributesParsed = 1 << 2,
AR_DeclspecAttributesParsed = 1 << 3,
AR_AllAttributesParsed = AR_GNUAttributesParsed |
AR_CXX11AttributesParsed |
AR_DeclspecAttributesParsed,
AR_VendorAttributesParsed = AR_GNUAttributesParsed |
AR_DeclspecAttributesParsed
};
void ParseTypeQualifierListOpt(
DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed,
bool AtomicAllowed = true, bool IdentifierRequired = false,
Optional<llvm::function_ref<void()>> CodeCompletionHandler = None);
void ParseDirectDeclarator(Declarator &D);
void ParseDecompositionDeclarator(Declarator &D);
void ParseParenDeclarator(Declarator &D);
void ParseFunctionDeclarator(Declarator &D,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker,
bool IsAmbiguous,
bool RequiresArg = false);
bool ParseRefQualifier(bool &RefQualifierIsLValueRef,
SourceLocation &RefQualifierLoc);
bool isFunctionDeclaratorIdentifierList();
void ParseFunctionDeclaratorIdentifierList(
Declarator &D,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo);
void ParseParameterDeclarationClause(
Declarator &D,
ParsedAttributes &attrs,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo,
SourceLocation &EllipsisLoc);
void ParseBracketDeclarator(Declarator &D);
void ParseMisplacedBracketDeclarator(Declarator &D);
//===--------------------------------------------------------------------===//
// C++ 7: Declarations [dcl.dcl]
/// The kind of attribute specifier we have found.
enum CXX11AttributeKind {
/// This is not an attribute specifier.
CAK_NotAttributeSpecifier,
/// This should be treated as an attribute-specifier.
CAK_AttributeSpecifier,
/// The next tokens are '[[', but this is not an attribute-specifier. This
/// is ill-formed by C++11 [dcl.attr.grammar]p6.
CAK_InvalidAttributeSpecifier
};
CXX11AttributeKind
isCXX11AttributeSpecifier(bool Disambiguate = false,
bool OuterMightBeMessageSend = false);
void DiagnoseUnexpectedNamespace(NamedDecl *Context);
DeclGroupPtrTy ParseNamespace(DeclaratorContext Context,
SourceLocation &DeclEnd,
SourceLocation InlineLoc = SourceLocation());
void ParseInnerNamespace(std::vector<SourceLocation> &IdentLoc,
std::vector<IdentifierInfo *> &Ident,
std::vector<SourceLocation> &NamespaceLoc,
unsigned int index, SourceLocation &InlineLoc,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker);
Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context);
Decl *ParseExportDeclaration();
DeclGroupPtrTy ParseUsingDirectiveOrDeclaration(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs);
Decl *ParseUsingDirective(DeclaratorContext Context,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
ParsedAttributes &attrs);
struct UsingDeclarator {
SourceLocation TypenameLoc;
CXXScopeSpec SS;
UnqualifiedId Name;
SourceLocation EllipsisLoc;
void clear() {
TypenameLoc = EllipsisLoc = SourceLocation();
SS.clear();
Name.clear();
}
};
bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D);
DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context,
const ParsedTemplateInfo &TemplateInfo,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
AccessSpecifier AS = AS_none);
Decl *ParseAliasDeclarationAfterDeclarator(
const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc,
UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS,
ParsedAttributes &Attrs, Decl **OwnedType = nullptr);
Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd);
Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc,
SourceLocation AliasLoc, IdentifierInfo *Alias,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// C++ 9: classes [class] and C structs/unions.
bool isValidAfterTypeSpecifier(bool CouldBeBitfield);
void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc,
DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, bool EnteringContext,
DeclSpecContext DSC,
ParsedAttributesWithRange &Attributes);
void SkipCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
unsigned TagType,
Decl *TagDecl);
void ParseCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
ParsedAttributesWithRange &Attrs,
unsigned TagType,
Decl *TagDecl);
ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction,
SourceLocation &EqualLoc);
bool ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo,
VirtSpecifiers &VS,
ExprResult &BitfieldSize,
LateParsedAttrList &LateAttrs);
void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D,
VirtSpecifiers &VS);
DeclGroupPtrTy ParseCXXClassMemberDeclaration(
AccessSpecifier AS, AttributeList *Attr,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ParsingDeclRAIIObject *DiagsFromTParams = nullptr);
DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas(
AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs,
DeclSpec::TST TagType, Decl *Tag);
void ParseConstructorInitializer(Decl *ConstructorDecl);
MemInitResult ParseMemInitializer(Decl *ConstructorDecl);
void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo,
Decl *ThisDecl);
//===--------------------------------------------------------------------===//
// C++ 10: Derived classes [class.derived]
TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
SourceLocation &EndLocation);
void ParseBaseClause(Decl *ClassDecl);
BaseResult ParseBaseSpecifier(Decl *ClassDecl);
AccessSpecifier getAccessSpecifierIfPresent() const;
bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Id,
bool AssumeTemplateId);
bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Result);
//===--------------------------------------------------------------------===//
// OpenMP: Directives and clauses.
/// Parse clauses for '#pragma omp declare simd'.
DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr,
CachedTokens &Toks,
SourceLocation Loc);
/// Parses declarative OpenMP directives.
DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl(
AccessSpecifier &AS, ParsedAttributesWithRange &Attrs,
DeclSpec::TST TagType = DeclSpec::TST_unspecified,
Decl *TagDecl = nullptr);
/// Parse 'omp declare reduction' construct.
DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS);
/// Parses initializer for provided omp_priv declaration inside the reduction
/// initializer.
void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm);
/// Parses simple list of variables.
///
/// \param Kind Kind of the directive.
/// \param Callback Callback function to be called for the list elements.
/// \param AllowScopeSpecifier true, if the variables can have fully
/// qualified names.
///
bool ParseOpenMPSimpleVarList(
OpenMPDirectiveKind Kind,
const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> &
Callback,
bool AllowScopeSpecifier);
/// Parses declarative or executable directive.
///
/// \param Allowed ACK_Any, if any directives are allowed,
/// ACK_StatementsOpenMPAnyExecutable - if any executable directives are
/// allowed, ACK_StatementsOpenMPNonStandalone - if only non-standalone
/// executable directives are allowed.
///
StmtResult
ParseOpenMPDeclarativeOrExecutableDirective(AllowedConstructsKind Allowed);
/// Parses clause of kind \a CKind for directive of a kind \a Kind.
///
/// \param DKind Kind of current directive.
/// \param CKind Kind of current clause.
/// \param FirstClause true, if this is the first clause of a kind \a CKind
/// in current directive.
///
OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind CKind, bool FirstClause);
/// Parses clause with a single expression of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses simple clause of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly);
/// Parses clause with a single expression and an additional argument
/// of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses clause without any additional arguments.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false);
/// Parses clause with the list of variables of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind, bool ParseOnly);
public:
/// Parses simple expression in parens for single-expression clauses of OpenMP
/// constructs.
/// \param RLoc Returned location of right paren.
ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc);
/// Data used for parsing list of variables in OpenMP clauses.
struct OpenMPVarListDataTy {
Expr *TailExpr = nullptr;
SourceLocation ColonLoc;
CXXScopeSpec ReductionIdScopeSpec;
DeclarationNameInfo ReductionId;
OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown;
OpenMPLinearClauseKind LinKind = OMPC_LINEAR_val;
OpenMPMapClauseKind MapTypeModifier = OMPC_MAP_unknown;
OpenMPMapClauseKind MapType = OMPC_MAP_unknown;
bool IsMapTypeImplicit = false;
SourceLocation DepLinMapLoc;
};
/// Parses clauses with list.
bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind,
SmallVectorImpl<Expr *> &Vars,
OpenMPVarListDataTy &Data);
bool ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
bool AllowDestructorName,
bool AllowConstructorName,
bool AllowDeductionGuide,
ParsedType ObjectType,
SourceLocation *TemplateKWLoc,
UnqualifiedId &Result);
private:
//===--------------------------------------------------------------------===//
// C++ 14: Templates [temp]
// C++ 14.1: Template Parameters [temp.param]
Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context,
SourceLocation &DeclEnd,
AccessSpecifier AS = AS_none,
AttributeList *AccessAttrs = nullptr);
Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context,
SourceLocation &DeclEnd,
AccessSpecifier AS,
AttributeList *AccessAttrs);
Decl *ParseSingleDeclarationAfterTemplate(
DeclaratorContext Context,
const ParsedTemplateInfo &TemplateInfo,
ParsingDeclRAIIObject &DiagsFromParams,
SourceLocation &DeclEnd,
AccessSpecifier AS=AS_none,
AttributeList *AccessAttrs = nullptr);
bool ParseTemplateParameters(unsigned Depth,
SmallVectorImpl<NamedDecl *> &TemplateParams,
SourceLocation &LAngleLoc,
SourceLocation &RAngleLoc);
bool ParseTemplateParameterList(unsigned Depth,
SmallVectorImpl<NamedDecl*> &TemplateParams);
bool isStartOfTemplateTypeParameter();
NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position);
void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc,
SourceLocation CorrectLoc,
bool AlreadyHasEllipsis,
bool IdentifierHasName);
void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc,
Declarator &D);
// C++ 14.3: Template arguments [temp.arg]
typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList;
bool ParseGreaterThanInTemplateList(SourceLocation &RAngleLoc,
bool ConsumeLastToken,
bool ObjCGenericList);
bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken,
SourceLocation &LAngleLoc,
TemplateArgList &TemplateArgs,
SourceLocation &RAngleLoc);
bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &TemplateName,
bool AllowTypeAnnotation = true);
void AnnotateTemplateIdTokenAsType(bool IsClassName = false);
bool IsTemplateArgumentList(unsigned Skip = 0);
bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs);
ParsedTemplateArgument ParseTemplateTemplateArgument();
ParsedTemplateArgument ParseTemplateArgument();
Decl *ParseExplicitInstantiation(DeclaratorContext Context,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
SourceLocation &DeclEnd,
AccessSpecifier AS = AS_none);
//===--------------------------------------------------------------------===//
// Modules
DeclGroupPtrTy ParseModuleDecl();
Decl *ParseModuleImport(SourceLocation AtLoc);
bool parseMisplacedModuleImport();
bool tryParseMisplacedModuleImport() {
tok::TokenKind Kind = Tok.getKind();
if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end ||
Kind == tok::annot_module_include)
return parseMisplacedModuleImport();
return false;
}
bool ParseModuleName(
SourceLocation UseLoc,
SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path,
bool IsImport);
//===--------------------------------------------------------------------===//
// C++11/G++: Type Traits [Type-Traits.html in the GCC manual]
ExprResult ParseTypeTrait();
//===--------------------------------------------------------------------===//
// Embarcadero: Arary and Expression Traits
ExprResult ParseArrayTypeTrait();
ExprResult ParseExpressionTrait();
//===--------------------------------------------------------------------===//
// Preprocessor code-completion pass-through
void CodeCompleteDirective(bool InConditional) override;
void CodeCompleteInConditionalExclusion() override;
void CodeCompleteMacroName(bool IsDefinition) override;
void CodeCompletePreprocessorExpression() override;
void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo,
unsigned ArgumentIndex) override;
void CodeCompleteNaturalLanguage() override;
};
} // end namespace clang
#endif
|
analyze.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% AAA N N AAA L Y Y ZZZZZ EEEEE %
% A A NN N A A L Y Y ZZ E %
% AAAAA N N N AAAAA L Y ZZZ EEE %
% A A N NN A A L Y ZZ E %
% A A N N A A LLLLL Y ZZZZZ EEEEE %
% %
% Analyze An Image %
% %
% Software Design %
% Bill Corbis %
% December 1998 %
% %
% %
% Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
*/
/*
Include declarations.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <assert.h>
#include <math.h>
#include "magick/MagickCore.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% a n a l y z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% analyzeImage() computes the brightness and saturation mean, standard
% deviation, kurtosis and skewness and stores these values as attributes
% of the image.
%
% The format of the analyzeImage method is:
%
% size_t analyzeImage(Image *images,const int argc,
% char **argv,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the address of a structure of type Image.
%
% o argc: Specifies a pointer to an integer describing the number of
% elements in the argument vector.
%
% o argv: Specifies a pointer to a text array containing the command line
% arguments.
%
% o exception: return any errors or warnings in this structure.
%
*/
ModuleExport size_t analyzeImage(Image **images,const int argc,
const char **argv,ExceptionInfo *exception)
{
char
text[MaxTextExtent];
double
area,
brightness,
brightness_mean,
brightness_standard_deviation,
brightness_kurtosis,
brightness_skewness,
brightness_sum_x,
brightness_sum_x2,
brightness_sum_x3,
brightness_sum_x4,
hue,
saturation,
saturation_mean,
saturation_standard_deviation,
saturation_kurtosis,
saturation_skewness,
saturation_sum_x,
saturation_sum_x2,
saturation_sum_x3,
saturation_sum_x4;
Image
*image;
assert(images != (Image **) NULL);
assert(*images != (Image *) NULL);
assert((*images)->signature == MagickSignature);
(void) argc;
(void) argv;
image=(*images);
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
{
CacheView
*image_view;
ssize_t
y;
MagickBooleanType
status;
brightness_sum_x=0.0;
brightness_sum_x2=0.0;
brightness_sum_x3=0.0;
brightness_sum_x4=0.0;
brightness_mean=0.0;
brightness_standard_deviation=0.0;
brightness_kurtosis=0.0;
brightness_skewness=0.0;
saturation_sum_x=0.0;
saturation_sum_x2=0.0;
saturation_sum_x3=0.0;
saturation_sum_x4=0.0;
saturation_mean=0.0;
saturation_standard_deviation=0.0;
saturation_kurtosis=0.0;
saturation_skewness=0.0;
area=0.0;
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ConvertRGBToHSB(GetPixelRed(p),GetPixelGreen(p),GetPixelBlue(p),
&hue,&saturation,&brightness);
brightness*=QuantumRange;
brightness_sum_x+=brightness;
brightness_sum_x2+=brightness*brightness;
brightness_sum_x3+=brightness*brightness*brightness;
brightness_sum_x4+=brightness*brightness*brightness*brightness;
saturation*=QuantumRange;
saturation_sum_x+=saturation;
saturation_sum_x2+=saturation*saturation;
saturation_sum_x3+=saturation*saturation*saturation;
saturation_sum_x4+=saturation*saturation*saturation*saturation;
area++;
p++;
}
}
image_view=DestroyCacheView(image_view);
if (area <= 0.0)
break;
brightness_mean=brightness_sum_x/area;
(void) FormatLocaleString(text,MaxTextExtent,"%g",brightness_mean);
(void) SetImageProperty(image,"filter:brightness:mean",text);
brightness_standard_deviation=sqrt(brightness_sum_x2/area-(brightness_sum_x/
area*brightness_sum_x/area));
(void) FormatLocaleString(text,MaxTextExtent,"%g",
brightness_standard_deviation);
(void) SetImageProperty(image,"filter:brightness:standard-deviation",text);
if (brightness_standard_deviation != 0)
brightness_kurtosis=(brightness_sum_x4/area-4.0*brightness_mean*
brightness_sum_x3/area+6.0*brightness_mean*brightness_mean*
brightness_sum_x2/area-3.0*brightness_mean*brightness_mean*
brightness_mean*brightness_mean)/(brightness_standard_deviation*
brightness_standard_deviation*brightness_standard_deviation*
brightness_standard_deviation)-3.0;
(void) FormatLocaleString(text,MaxTextExtent,"%g",brightness_kurtosis);
(void) SetImageProperty(image,"filter:brightness:kurtosis",text);
if (brightness_standard_deviation != 0)
brightness_skewness=(brightness_sum_x3/area-3.0*brightness_mean*
brightness_sum_x2/area+2.0*brightness_mean*brightness_mean*
brightness_mean)/(brightness_standard_deviation*
brightness_standard_deviation*brightness_standard_deviation);
(void) FormatLocaleString(text,MaxTextExtent,"%g",brightness_skewness);
(void) SetImageProperty(image,"filter:brightness:skewness",text);
saturation_mean=saturation_sum_x/area;
(void) FormatLocaleString(text,MaxTextExtent,"%g",saturation_mean);
(void) SetImageProperty(image,"filter:saturation:mean",text);
saturation_standard_deviation=sqrt(saturation_sum_x2/area-(saturation_sum_x/
area*saturation_sum_x/area));
(void) FormatLocaleString(text,MaxTextExtent,"%g",
saturation_standard_deviation);
(void) SetImageProperty(image,"filter:saturation:standard-deviation",text);
if (saturation_standard_deviation != 0)
saturation_kurtosis=(saturation_sum_x4/area-4.0*saturation_mean*
saturation_sum_x3/area+6.0*saturation_mean*saturation_mean*
saturation_sum_x2/area-3.0*saturation_mean*saturation_mean*
saturation_mean*saturation_mean)/(saturation_standard_deviation*
saturation_standard_deviation*saturation_standard_deviation*
saturation_standard_deviation)-3.0;
(void) FormatLocaleString(text,MaxTextExtent,"%g",saturation_kurtosis);
(void) SetImageProperty(image,"filter:saturation:kurtosis",text);
if (saturation_standard_deviation != 0)
saturation_skewness=(saturation_sum_x3/area-3.0*saturation_mean*
saturation_sum_x2/area+2.0*saturation_mean*saturation_mean*
saturation_mean)/(saturation_standard_deviation*
saturation_standard_deviation*saturation_standard_deviation);
(void) FormatLocaleString(text,MaxTextExtent,"%g",saturation_skewness);
(void) SetImageProperty(image,"filter:saturation:skewness",text);
}
return(MagickImageFilterSignature);
}
|
sstruct_sharedDOFComm.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision: 2.14 $
***********************************************************************EHEADER*/
/******************************************************************************
* OpenMP Problems
*
* Need to fix the way these variables are set and incremented in loops:
* tot_nsendRowsNcols, send_ColsData_alloc, tot_sendColsData
*
******************************************************************************/
#include "_hypre_sstruct_ls.h"
/*--------------------------------------------------------------------------
* hypre_MaxwellOffProcRowCreate
*--------------------------------------------------------------------------*/
hypre_MaxwellOffProcRow *
hypre_MaxwellOffProcRowCreate(HYPRE_Int ncols)
{
hypre_MaxwellOffProcRow *OffProcRow;
HYPRE_Int *cols;
double *data;
OffProcRow= hypre_CTAlloc(hypre_MaxwellOffProcRow, 1);
(OffProcRow -> ncols)= ncols;
cols= hypre_TAlloc(HYPRE_Int, ncols);
data= hypre_TAlloc(double, ncols);
(OffProcRow -> cols)= cols;
(OffProcRow -> data)= data;
return OffProcRow;
}
/*--------------------------------------------------------------------------
* hypre_MaxwellOffProcRowDestroy
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_MaxwellOffProcRowDestroy(void *OffProcRow_vdata)
{
hypre_MaxwellOffProcRow *OffProcRow= OffProcRow_vdata;
HYPRE_Int ierr= 0;
if (OffProcRow)
{
hypre_TFree(OffProcRow -> cols);
hypre_TFree(OffProcRow -> data);
}
hypre_TFree(OffProcRow);
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SStructSharedDOF_ParcsrMatRowsComm
* Given a sstruct_grid & parcsr matrix with rows corresponding to the
* sstruct_grid, determine and extract the rows that must be communicated.
* These rows are for shared dof that geometrically lie on processor
* boundaries but internally are stored on one processor.
* Algo:
* for each cellbox
* RECVs:
* i) stretch the cellbox to the variable box
* ii) in the appropriate (dof-dependent) direction, take the
* boundary and boxman_intersect to extract boxmanentries
* that contain these boundary edges.
* iii)loop over the boxmanentries and see if they belong
* on this proc or another proc
* a) if belong on another proc, these are the recvs:
* count and prepare the communication buffers and
* values.
*
* SENDs:
* i) form layer of cells that is one layer off cellbox
* (stretches in the appropriate direction)
* ii) boxman_intersect with the cellgrid boxman
* iii)loop over the boxmanentries and see if they belong
* on this proc or another proc
* a) if belong on another proc, these are the sends:
* count and prepare the communication buffers and
* values.
*
* Note: For the recv data, the dof can come from only one processor.
* For the send data, the dof can go to more than one processor
* (the same dof is on the boundary of several cells).
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructSharedDOF_ParcsrMatRowsComm( hypre_SStructGrid *grid,
hypre_ParCSRMatrix *A,
HYPRE_Int *num_offprocrows_ptr,
hypre_MaxwellOffProcRow ***OffProcRows_ptr)
{
MPI_Comm A_comm= hypre_ParCSRMatrixComm(A);
MPI_Comm grid_comm= hypre_SStructGridComm(grid);
HYPRE_Int matrix_type= HYPRE_PARCSR;
HYPRE_Int nparts= hypre_SStructGridNParts(grid);
HYPRE_Int ndim = hypre_SStructGridNDim(grid);
hypre_SStructGrid *cell_ssgrid;
hypre_SStructPGrid *pgrid;
hypre_StructGrid *cellgrid;
hypre_BoxArray *cellboxes;
hypre_Box *box, *cellbox, vbox, boxman_entry_box;
hypre_Index loop_size, start, lindex;
HYPRE_Int start_rank, end_rank, rank;
HYPRE_Int i, j, k, m, n, t, part, var, nvars;
HYPRE_SStructVariable *vartypes;
HYPRE_Int nbdry_slabs;
hypre_BoxArray *recv_slabs, *send_slabs;
hypre_Index varoffset;
hypre_BoxManager **boxmans, *cell_boxman;
hypre_BoxManEntry **boxman_entries, *entry;
HYPRE_Int nboxman_entries;
hypre_Index ishift, jshift, kshift, zero_index;
hypre_Index ilower, iupper, index;
HYPRE_Int proc, nprocs, myproc;
HYPRE_Int *SendToProcs, *RecvFromProcs;
HYPRE_Int **send_RowsNcols; /* buffer for rows & ncols */
HYPRE_Int *send_RowsNcols_alloc;
HYPRE_Int *send_ColsData_alloc;
HYPRE_Int *tot_nsendRowsNcols, *tot_sendColsData;
double **vals; /* buffer for cols & data */
HYPRE_Int *col_inds;
double *values;
hypre_MPI_Request *requests;
hypre_MPI_Status *status;
HYPRE_Int **rbuffer_RowsNcols;
double **rbuffer_ColsData;
HYPRE_Int num_sends, num_recvs;
hypre_MaxwellOffProcRow **OffProcRows;
HYPRE_Int *starts;
HYPRE_Int ierr= 0;
hypre_MPI_Comm_rank(A_comm, &myproc);
hypre_MPI_Comm_size(grid_comm, &nprocs);
start_rank= hypre_ParCSRMatrixFirstRowIndex(A);
end_rank = hypre_ParCSRMatrixLastRowIndex(A);
hypre_SetIndex(ishift, 1, 0, 0);
hypre_SetIndex(jshift, 0, 1, 0);
hypre_SetIndex(kshift, 0, 0, 1);
hypre_SetIndex(zero_index, 0, 0, 0);
/* need a cellgrid boxman to determine the send boxes -> only the cell dofs
are unique so a boxman intersect can be used to get the edges that
must be sent. */
HYPRE_SStructGridCreate(grid_comm, ndim, nparts, &cell_ssgrid);
vartypes= hypre_CTAlloc(HYPRE_SStructVariable, 1);
vartypes[0]= HYPRE_SSTRUCT_VARIABLE_CELL;
for (i= 0; i< nparts; i++)
{
pgrid= hypre_SStructGridPGrid(grid, i);
cellgrid= hypre_SStructPGridCellSGrid(pgrid);
cellboxes= hypre_StructGridBoxes(cellgrid);
hypre_ForBoxI(j, cellboxes)
{
box= hypre_BoxArrayBox(cellboxes, j);
HYPRE_SStructGridSetExtents(cell_ssgrid, i,
hypre_BoxIMin(box), hypre_BoxIMax(box));
}
HYPRE_SStructGridSetVariables(cell_ssgrid, i, 1, vartypes);
}
HYPRE_SStructGridAssemble(cell_ssgrid);
hypre_TFree(vartypes);
/* box algebra to determine communication */
SendToProcs = hypre_CTAlloc(HYPRE_Int, nprocs);
RecvFromProcs = hypre_CTAlloc(HYPRE_Int, nprocs);
send_RowsNcols = hypre_TAlloc(HYPRE_Int *, nprocs);
send_RowsNcols_alloc= hypre_TAlloc(HYPRE_Int , nprocs);
send_ColsData_alloc = hypre_TAlloc(HYPRE_Int , nprocs);
vals = hypre_TAlloc(double *, nprocs);
tot_nsendRowsNcols = hypre_CTAlloc(HYPRE_Int, nprocs);
tot_sendColsData = hypre_CTAlloc(HYPRE_Int, nprocs);
for (i= 0; i< nprocs; i++)
{
send_RowsNcols[i]= hypre_TAlloc(HYPRE_Int, 1000); /* initial allocation */
send_RowsNcols_alloc[i]= 1000;
vals[i]= hypre_TAlloc(double, 2000); /* initial allocation */
send_ColsData_alloc[i]= 2000;
}
for (part= 0; part< nparts; part++)
{
pgrid= hypre_SStructGridPGrid(grid, part);
nvars= hypre_SStructPGridNVars(pgrid);
vartypes= hypre_SStructPGridVarTypes(pgrid);
cellgrid = hypre_SStructPGridCellSGrid(pgrid);
cellboxes= hypre_StructGridBoxes(cellgrid);
boxmans= hypre_TAlloc(hypre_BoxManager *, nvars);
for (t= 0; t< nvars; t++)
{
boxmans[t]= hypre_SStructGridBoxManager(grid, part, t);
}
cell_boxman= hypre_SStructGridBoxManager(cell_ssgrid, part, 0);
hypre_ForBoxI(j, cellboxes)
{
cellbox= hypre_BoxArrayBox(cellboxes, j);
for (t= 0; t< nvars; t++)
{
var= vartypes[t];
hypre_SStructVariableGetOffset((hypre_SStructVariable) var,
ndim, varoffset);
/* form the variable cellbox */
hypre_CopyBox(cellbox, &vbox);
hypre_SubtractIndex(hypre_BoxIMin(&vbox), varoffset,
hypre_BoxIMin(&vbox));
/* boundary layer box depends on variable type */
switch(var)
{
case 1: /* node based */
{
nbdry_slabs= 6;
recv_slabs = hypre_BoxArrayCreate(nbdry_slabs);
/* slab in the +/- i,j,k directions */
box= hypre_BoxArrayBox(recv_slabs, 0);
hypre_CopyBox(&vbox, box);
hypre_BoxIMin(box)[0]= hypre_BoxIMax(box)[0];
box= hypre_BoxArrayBox(recv_slabs, 1);
hypre_CopyBox(&vbox, box);
hypre_BoxIMax(box)[0]= hypre_BoxIMin(box)[0];
/* need to contract the slab in the i direction to avoid repeated
counting of some nodes. */
box= hypre_BoxArrayBox(recv_slabs, 2);
hypre_CopyBox(&vbox, box);
hypre_BoxIMin(box)[1]= hypre_BoxIMax(box)[1];
hypre_BoxIMin(box)[0]++; /* contract */
hypre_BoxIMax(box)[0]--; /* contract */
box= hypre_BoxArrayBox(recv_slabs, 3);
hypre_CopyBox(&vbox, box);
hypre_BoxIMax(box)[1]= hypre_BoxIMin(box)[1];
hypre_BoxIMin(box)[0]++; /* contract */
hypre_BoxIMax(box)[0]--; /* contract */
/* need to contract the slab in the i & j directions to avoid repeated
counting of some nodes. */
box= hypre_BoxArrayBox(recv_slabs, 4);
hypre_CopyBox(&vbox, box);
hypre_BoxIMin(box)[2]= hypre_BoxIMax(box)[2];
hypre_BoxIMin(box)[0]++; /* contract */
hypre_BoxIMax(box)[0]--; /* contract */
hypre_BoxIMin(box)[1]++; /* contract */
hypre_BoxIMax(box)[1]--; /* contract */
box= hypre_BoxArrayBox(recv_slabs, 5);
hypre_CopyBox(&vbox, box);
hypre_BoxIMax(box)[2]= hypre_BoxIMin(box)[2];
hypre_BoxIMin(box)[0]++; /* contract */
hypre_BoxIMax(box)[0]--; /* contract */
hypre_BoxIMin(box)[1]++; /* contract */
hypre_BoxIMax(box)[1]--; /* contract */
/* send boxes are cell-based stretching out of cellbox - i.e., cells
that have these edges as boundary */
send_slabs= hypre_BoxArrayCreate(nbdry_slabs);
box= hypre_BoxArrayBox(send_slabs, 0);
hypre_CopyBox(cellbox, box);
hypre_BoxIMax(box)[0]++;
hypre_BoxIMin(box)[0]= hypre_BoxIMax(box)[0];
hypre_BoxIMax(box)[2]++; /* stretch one layer +/- k*/
hypre_BoxIMin(box)[2]--;
hypre_BoxIMax(box)[1]++; /* stretch one layer +/- j*/
hypre_BoxIMin(box)[1]--;
box= hypre_BoxArrayBox(send_slabs, 1);
hypre_CopyBox(cellbox, box);
hypre_BoxIMin(box)[0]--;
hypre_BoxIMax(box)[0]= hypre_BoxIMin(box)[0];
hypre_BoxIMax(box)[2]++; /* stretch one layer +/- k*/
hypre_BoxIMin(box)[2]--;
hypre_BoxIMax(box)[1]++; /* stretch one layer +/- j*/
hypre_BoxIMin(box)[1]--;
box= hypre_BoxArrayBox(send_slabs, 2);
hypre_CopyBox(cellbox, box);
hypre_BoxIMax(box)[1]++;
hypre_BoxIMin(box)[1]= hypre_BoxIMax(box)[1];
hypre_BoxIMax(box)[2]++; /* stretch one layer +/- k*/
hypre_BoxIMin(box)[2]--;
box= hypre_BoxArrayBox(send_slabs, 3);
hypre_CopyBox(cellbox, box);
hypre_BoxIMin(box)[1]--;
hypre_BoxIMax(box)[1]= hypre_BoxIMin(box)[1];
hypre_BoxIMax(box)[2]++; /* stretch one layer +/- k*/
hypre_BoxIMin(box)[2]--;
box= hypre_BoxArrayBox(send_slabs, 4);
hypre_CopyBox(cellbox, box);
hypre_BoxIMax(box)[2]++;
hypre_BoxIMin(box)[2]= hypre_BoxIMax(box)[2];
box= hypre_BoxArrayBox(send_slabs, 5);
hypre_CopyBox(cellbox, box);
hypre_BoxIMin(box)[2]--;
hypre_BoxIMax(box)[2]= hypre_BoxIMin(box)[2];
break;
}
case 2: /* x-face based */
{
nbdry_slabs= 2;
recv_slabs= hypre_BoxArrayCreate(nbdry_slabs);
/* slab in the +/- i direction */
box= hypre_BoxArrayBox(recv_slabs, 0);
hypre_CopyBox(&vbox, box);
hypre_BoxIMin(box)[0]= hypre_BoxIMax(box)[0];
box= hypre_BoxArrayBox(recv_slabs, 1);
hypre_CopyBox(&vbox, box);
hypre_BoxIMax(box)[0]= hypre_BoxIMin(box)[0];
/* send boxes are cell-based stretching out of cellbox - i.e., cells
that have these edges as boundary */
send_slabs= hypre_BoxArrayCreate(nbdry_slabs);
box= hypre_BoxArrayBox(send_slabs, 0);
hypre_CopyBox(cellbox, box);
hypre_BoxIMax(box)[0]++;
hypre_BoxIMin(box)[0]= hypre_BoxIMax(box)[0];
box= hypre_BoxArrayBox(send_slabs, 1);
hypre_CopyBox(cellbox, box);
hypre_BoxIMin(box)[0]--;
hypre_BoxIMax(box)[0]= hypre_BoxIMin(box)[0];
break;
}
case 3: /* y-face based */
{
nbdry_slabs= 2;
recv_slabs= hypre_BoxArrayCreate(nbdry_slabs);
/* slab in the +/- j direction */
box= hypre_BoxArrayBox(recv_slabs, 0);
hypre_CopyBox(&vbox, box);
hypre_BoxIMin(box)[1]= hypre_BoxIMax(box)[1];
box= hypre_BoxArrayBox(recv_slabs, 1);
hypre_CopyBox(&vbox, box);
hypre_BoxIMax(box)[1]= hypre_BoxIMin(box)[1];
/* send boxes are cell-based stretching out of cellbox - i.e., cells
that have these edges as boundary */
send_slabs= hypre_BoxArrayCreate(nbdry_slabs);
box= hypre_BoxArrayBox(send_slabs, 0);
hypre_CopyBox(cellbox, box);
hypre_BoxIMax(box)[1]++;
hypre_BoxIMin(box)[1]= hypre_BoxIMax(box)[1];
box= hypre_BoxArrayBox(send_slabs, 1);
hypre_CopyBox(cellbox, box);
hypre_BoxIMin(box)[1]--;
hypre_BoxIMax(box)[1]= hypre_BoxIMin(box)[1];
break;
}
case 4: /* z-face based */
{
nbdry_slabs= 2;
recv_slabs= hypre_BoxArrayCreate(nbdry_slabs);
/* slab in the +/- k direction */
box= hypre_BoxArrayBox(recv_slabs, 0);
hypre_CopyBox(&vbox, box);
hypre_BoxIMin(box)[2]= hypre_BoxIMax(box)[2];
box= hypre_BoxArrayBox(recv_slabs, 1);
hypre_CopyBox(&vbox, box);
hypre_BoxIMax(box)[2]= hypre_BoxIMin(box)[2];
/* send boxes are cell-based stretching out of cellbox - i.e., cells
that have these edges as boundary */
send_slabs= hypre_BoxArrayCreate(nbdry_slabs);
box= hypre_BoxArrayBox(send_slabs, 0);
hypre_CopyBox(cellbox, box);
hypre_BoxIMax(box)[2]++;
hypre_BoxIMin(box)[2]= hypre_BoxIMax(box)[2];
box= hypre_BoxArrayBox(send_slabs, 1);
hypre_CopyBox(cellbox, box);
hypre_BoxIMin(box)[2]--;
hypre_BoxIMax(box)[2]= hypre_BoxIMin(box)[2];
break;
}
case 5: /* x-edge based */
{
nbdry_slabs= 4;
recv_slabs= hypre_BoxArrayCreate(nbdry_slabs);
/* slab in the +/- j & k direction */
box= hypre_BoxArrayBox(recv_slabs, 0);
hypre_CopyBox(&vbox, box);
hypre_BoxIMin(box)[1]= hypre_BoxIMax(box)[1];
box= hypre_BoxArrayBox(recv_slabs, 1);
hypre_CopyBox(&vbox, box);
hypre_BoxIMax(box)[1]= hypre_BoxIMin(box)[1];
/* need to contract the slab in the j direction to avoid repeated
counting of some x-edges. */
box= hypre_BoxArrayBox(recv_slabs, 2);
hypre_CopyBox(&vbox, box);
hypre_BoxIMin(box)[2]= hypre_BoxIMax(box)[2];
hypre_BoxIMin(box)[1]++; /* contract */
hypre_BoxIMax(box)[1]--; /* contract */
box= hypre_BoxArrayBox(recv_slabs, 3);
hypre_CopyBox(&vbox, box);
hypre_BoxIMax(box)[2]= hypre_BoxIMin(box)[2];
hypre_BoxIMin(box)[1]++; /* contract */
hypre_BoxIMax(box)[1]--; /* contract */
/* send boxes are cell-based stretching out of cellbox - i.e., cells
that have these edges as boundary */
send_slabs= hypre_BoxArrayCreate(nbdry_slabs);
box= hypre_BoxArrayBox(send_slabs, 0);
hypre_CopyBox(cellbox, box);
hypre_BoxIMax(box)[1]++;
hypre_BoxIMin(box)[1]= hypre_BoxIMax(box)[1];
hypre_BoxIMax(box)[2]++; /* stretch one layer +/- k*/
hypre_BoxIMin(box)[2]--;
box= hypre_BoxArrayBox(send_slabs, 1);
hypre_CopyBox(cellbox, box);
hypre_BoxIMin(box)[1]--;
hypre_BoxIMax(box)[1]= hypre_BoxIMin(box)[1];
hypre_BoxIMax(box)[2]++; /* stretch one layer +/- k*/
hypre_BoxIMin(box)[2]--;
box= hypre_BoxArrayBox(send_slabs, 2);
hypre_CopyBox(cellbox, box);
hypre_BoxIMax(box)[2]++;
hypre_BoxIMin(box)[2]= hypre_BoxIMax(box)[2];
box= hypre_BoxArrayBox(send_slabs, 3);
hypre_CopyBox(cellbox, box);
hypre_BoxIMin(box)[2]--;
hypre_BoxIMax(box)[2]= hypre_BoxIMin(box)[2];
break;
}
case 6: /* y-edge based */
{
nbdry_slabs= 4;
recv_slabs= hypre_BoxArrayCreate(nbdry_slabs);
/* slab in the +/- i & k direction */
box= hypre_BoxArrayBox(recv_slabs, 0);
hypre_CopyBox(&vbox, box);
hypre_BoxIMin(box)[0]= hypre_BoxIMax(box)[0];
box= hypre_BoxArrayBox(recv_slabs, 1);
hypre_CopyBox(&vbox, box);
hypre_BoxIMax(box)[0]= hypre_BoxIMin(box)[0];
/* need to contract the slab in the i direction to avoid repeated
counting of some y-edges. */
box= hypre_BoxArrayBox(recv_slabs, 2);
hypre_CopyBox(&vbox, box);
hypre_BoxIMin(box)[2]= hypre_BoxIMax(box)[2];
hypre_BoxIMin(box)[0]++; /* contract */
hypre_BoxIMax(box)[0]--; /* contract */
box= hypre_BoxArrayBox(recv_slabs, 3);
hypre_CopyBox(&vbox, box);
hypre_BoxIMax(box)[2]= hypre_BoxIMin(box)[2];
hypre_BoxIMin(box)[0]++; /* contract */
hypre_BoxIMax(box)[0]--; /* contract */
/* send boxes are cell-based stretching out of cellbox - i.e., cells
that have these edges as boundary */
send_slabs= hypre_BoxArrayCreate(nbdry_slabs);
box= hypre_BoxArrayBox(send_slabs, 0);
hypre_CopyBox(cellbox, box);
hypre_BoxIMax(box)[0]++;
hypre_BoxIMin(box)[0]= hypre_BoxIMax(box)[0];
hypre_BoxIMax(box)[2]++; /* stretch one layer +/- k*/
hypre_BoxIMin(box)[2]--;
box= hypre_BoxArrayBox(send_slabs, 1);
hypre_CopyBox(cellbox, box);
hypre_BoxIMin(box)[0]--;
hypre_BoxIMax(box)[0]= hypre_BoxIMin(box)[0];
hypre_BoxIMax(box)[2]++; /* stretch one layer +/- k*/
hypre_BoxIMin(box)[2]--;
box= hypre_BoxArrayBox(send_slabs, 2);
hypre_CopyBox(cellbox, box);
hypre_BoxIMax(box)[2]++;
hypre_BoxIMin(box)[2]= hypre_BoxIMax(box)[2];
box= hypre_BoxArrayBox(send_slabs, 3);
hypre_CopyBox(cellbox, box);
hypre_BoxIMin(box)[2]--;
hypre_BoxIMax(box)[2]= hypre_BoxIMin(box)[2];
break;
}
case 7: /* z-edge based */
{
nbdry_slabs= 4;
recv_slabs= hypre_BoxArrayCreate(nbdry_slabs);
/* slab in the +/- i & j direction */
box= hypre_BoxArrayBox(recv_slabs, 0);
hypre_CopyBox(&vbox, box);
hypre_BoxIMin(box)[0]= hypre_BoxIMax(box)[0];
box= hypre_BoxArrayBox(recv_slabs, 1);
hypre_CopyBox(&vbox, box);
hypre_BoxIMax(box)[0]= hypre_BoxIMin(box)[0];
/* need to contract the slab in the i direction to avoid repeated
counting of some z-edges. */
box= hypre_BoxArrayBox(recv_slabs, 2);
hypre_CopyBox(&vbox, box);
hypre_BoxIMin(box)[1]= hypre_BoxIMax(box)[1];
hypre_BoxIMin(box)[0]++; /* contract */
hypre_BoxIMax(box)[0]--; /* contract */
box= hypre_BoxArrayBox(recv_slabs, 3);
hypre_CopyBox(&vbox, box);
hypre_BoxIMax(box)[1]= hypre_BoxIMin(box)[1];
hypre_BoxIMin(box)[0]++; /* contract */
hypre_BoxIMax(box)[0]--; /* contract */
/* send boxes are cell-based stretching out of cellbox - i.e., cells
that have these edges as boundary */
send_slabs= hypre_BoxArrayCreate(nbdry_slabs);
box= hypre_BoxArrayBox(send_slabs, 0);
hypre_CopyBox(cellbox, box);
hypre_BoxIMax(box)[1]++;
hypre_BoxIMin(box)[1]= hypre_BoxIMax(box)[1];
hypre_BoxIMax(box)[0]++; /* stretch one layer +/- i*/
hypre_BoxIMin(box)[0]--;
box= hypre_BoxArrayBox(send_slabs, 1);
hypre_CopyBox(cellbox, box);
hypre_BoxIMin(box)[1]--;
hypre_BoxIMax(box)[1]= hypre_BoxIMin(box)[1];
hypre_BoxIMax(box)[0]++; /* stretch one layer +/- i*/
hypre_BoxIMin(box)[0]--;
box= hypre_BoxArrayBox(send_slabs, 2);
hypre_CopyBox(cellbox, box);
hypre_BoxIMax(box)[0]++;
hypre_BoxIMin(box)[0]= hypre_BoxIMax(box)[0];
box= hypre_BoxArrayBox(send_slabs, 3);
hypre_CopyBox(cellbox, box);
hypre_BoxIMin(box)[0]--;
hypre_BoxIMax(box)[0]= hypre_BoxIMin(box)[0];
break;
}
} /* switch(var) */
/* determine no. of recv rows */
for (i= 0; i< nbdry_slabs; i++)
{
box= hypre_BoxArrayBox(recv_slabs, i);
hypre_BoxManIntersect(boxmans[t], hypre_BoxIMin(box), hypre_BoxIMax(box),
&boxman_entries, &nboxman_entries);
for (m= 0; m< nboxman_entries; m++)
{
hypre_SStructBoxManEntryGetProcess(boxman_entries[m], &proc);
if (proc != myproc)
{
hypre_BoxManEntryGetExtents(boxman_entries[m], ilower, iupper);
hypre_BoxSetExtents(&boxman_entry_box, ilower, iupper);
hypre_IntersectBoxes(&boxman_entry_box, box, &boxman_entry_box);
RecvFromProcs[proc]+= hypre_BoxVolume(&boxman_entry_box);
}
}
hypre_TFree(boxman_entries);
/* determine send rows. Note the cell_boxman */
box= hypre_BoxArrayBox(send_slabs, i);
hypre_BoxManIntersect(cell_boxman, hypre_BoxIMin(box), hypre_BoxIMax(box),
&boxman_entries, &nboxman_entries);
for (m= 0; m< nboxman_entries; m++)
{
hypre_SStructBoxManEntryGetProcess(boxman_entries[m], &proc);
if (proc != myproc)
{
hypre_BoxManEntryGetExtents(boxman_entries[m], ilower, iupper);
hypre_BoxSetExtents(&boxman_entry_box, ilower, iupper);
hypre_IntersectBoxes(&boxman_entry_box, box, &boxman_entry_box);
/* not correct box piece right now. Need to determine
the correct var box - extend to var_box and then intersect
with vbox */
hypre_SubtractIndex(hypre_BoxIMin(&boxman_entry_box), varoffset,
hypre_BoxIMin(&boxman_entry_box));
hypre_IntersectBoxes(&boxman_entry_box, &vbox, &boxman_entry_box);
SendToProcs[proc]+= 2*hypre_BoxVolume(&boxman_entry_box);
/* check to see if sufficient memory allocation for send_rows */
if (SendToProcs[proc] > send_RowsNcols_alloc[proc])
{
send_RowsNcols_alloc[proc]= SendToProcs[proc];
send_RowsNcols[proc]=
hypre_TReAlloc(send_RowsNcols[proc], HYPRE_Int,
send_RowsNcols_alloc[proc]);
}
hypre_BoxGetSize(&boxman_entry_box, loop_size);
hypre_CopyIndex(hypre_BoxIMin(&boxman_entry_box), start);
hypre_BoxLoop0Begin(ndim, loop_size);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,lindex,index,entry,rank,tot_nsendRowsNcols,n,col_inds,values,send_ColsData_alloc,k,tot_sendColsData) HYPRE_SMP_SCHEDULE
#endif
#else
hypre_BoxLoopSetOneBlock();
#endif
hypre_BoxLoop0For()
{
hypre_BoxLoopGetIndex(lindex);
hypre_SetIndex(index, lindex[0], lindex[1], lindex[2]);
hypre_AddIndex(index, start, index);
hypre_SStructGridFindBoxManEntry(grid, part, index, t,
&entry);
if (entry)
{
hypre_SStructBoxManEntryGetGlobalRank(entry, index,
&rank, matrix_type);
/* index may still be off myproc because vbox was formed
by expanding the cellbox to the variable box without
checking (difficult) the whole expanded box is on myproc */
if (rank <= end_rank && rank >= start_rank)
{
send_RowsNcols[proc][tot_nsendRowsNcols[proc]]= rank;
tot_nsendRowsNcols[proc]++;
HYPRE_ParCSRMatrixGetRow((HYPRE_ParCSRMatrix) A, rank, &n,
&col_inds, &values);
send_RowsNcols[proc][tot_nsendRowsNcols[proc]]= n;
tot_nsendRowsNcols[proc]++;
/* check if sufficient memory allocation in the data arrays */
if ( (tot_sendColsData[proc]+2*n) > send_ColsData_alloc[proc] )
{
send_ColsData_alloc[proc]+= 2000;
vals[proc]= hypre_TReAlloc(vals[proc], double,
send_ColsData_alloc[proc]);
}
for (k= 0; k< n; k++)
{
vals[proc][tot_sendColsData[proc]]= (double) col_inds[k];
tot_sendColsData[proc]++;
vals[proc][tot_sendColsData[proc]]= values[k];
tot_sendColsData[proc]++;
}
HYPRE_ParCSRMatrixRestoreRow((HYPRE_ParCSRMatrix) A, rank, &n,
&col_inds, &values);
} /* if (rank <= end_rank && rank >= start_rank) */
} /* if (entry) */
}
hypre_BoxLoop0End();
} /* if (proc != myproc) */
} /* for (m= 0; m< nboxman_entries; m++) */
hypre_TFree(boxman_entries);
} /* for (i= 0; i< nbdry_slabs; i++) */
hypre_BoxArrayDestroy(send_slabs);
hypre_BoxArrayDestroy(recv_slabs);
} /* for (t= 0; t< nvars; t++) */
} /* hypre_ForBoxI(j, cellboxes) */
hypre_TFree(boxmans);
} /* for (part= 0; part< nparts; part++) */
HYPRE_SStructGridDestroy(cell_ssgrid);
num_sends= 0;
num_recvs= 0;
k= 0;
starts= hypre_CTAlloc(HYPRE_Int, nprocs+1);
for (i= 0; i< nprocs; i++)
{
starts[i+1]= starts[i]+RecvFromProcs[i];
if (RecvFromProcs[i])
{
num_recvs++;
k+= RecvFromProcs[i];
}
if (tot_sendColsData[i])
{
num_sends++;
}
}
OffProcRows= hypre_TAlloc(hypre_MaxwellOffProcRow *, k);
*num_offprocrows_ptr= k;
requests= hypre_CTAlloc(hypre_MPI_Request, num_sends+num_recvs);
status = hypre_CTAlloc(hypre_MPI_Status, num_sends+num_recvs);
/* send row size data */
j= 0;
rbuffer_RowsNcols= hypre_TAlloc(HYPRE_Int *, nprocs);
rbuffer_ColsData = hypre_TAlloc(double *, nprocs);
for (proc= 0; proc< nprocs; proc++)
{
if (RecvFromProcs[proc])
{
rbuffer_RowsNcols[proc]= hypre_TAlloc(HYPRE_Int, 2*RecvFromProcs[proc]);
hypre_MPI_Irecv(rbuffer_RowsNcols[proc], 2*RecvFromProcs[proc], HYPRE_MPI_INT,
proc, 0, grid_comm, &requests[j++]);
} /* if (RecvFromProcs[proc]) */
} /* for (proc= 0; proc< nprocs; proc++) */
for (proc= 0; proc< nprocs; proc++)
{
if (tot_nsendRowsNcols[proc])
{
hypre_MPI_Isend(send_RowsNcols[proc], tot_nsendRowsNcols[proc], HYPRE_MPI_INT, proc,
0, grid_comm, &requests[j++]);
}
}
hypre_MPI_Waitall(j, requests, status);
/* unpack data */
for (proc= 0; proc< nprocs; proc++)
{
send_RowsNcols_alloc[proc]= 0;
if (RecvFromProcs[proc])
{
m= 0; ;
for (i= 0; i< RecvFromProcs[proc]; i++)
{
/* rbuffer_RowsNcols[m] has the row & rbuffer_RowsNcols[m+1] the col size */
OffProcRows[starts[proc]+i]= hypre_MaxwellOffProcRowCreate(rbuffer_RowsNcols[proc][m+1]);
(OffProcRows[starts[proc]+i] -> row) = rbuffer_RowsNcols[proc][m];
(OffProcRows[starts[proc]+i] -> ncols)= rbuffer_RowsNcols[proc][m+1];
send_RowsNcols_alloc[proc]+= rbuffer_RowsNcols[proc][m+1];
m+= 2;
}
rbuffer_ColsData[proc]= hypre_TAlloc(double, 2*send_RowsNcols_alloc[proc]);
hypre_TFree(rbuffer_RowsNcols[proc]);
}
}
hypre_TFree(rbuffer_RowsNcols);
hypre_TFree(requests);
hypre_TFree(status);
requests= hypre_CTAlloc(hypre_MPI_Request, num_sends+num_recvs);
status = hypre_CTAlloc(hypre_MPI_Status, num_sends+num_recvs);
/* send row data */
j= 0;
for (proc= 0; proc< nprocs; proc++)
{
if (RecvFromProcs[proc])
{
hypre_MPI_Irecv(rbuffer_ColsData[proc], 2*send_RowsNcols_alloc[proc], hypre_MPI_DOUBLE,
proc, 1, grid_comm, &requests[j++]);
} /* if (RecvFromProcs[proc]) */
} /* for (proc= 0; proc< nprocs; proc++) */
for (proc= 0; proc< nprocs; proc++)
{
if (tot_sendColsData[proc])
{
hypre_MPI_Isend(vals[proc], tot_sendColsData[proc], hypre_MPI_DOUBLE, proc,
1, grid_comm, &requests[j++]);
}
}
hypre_MPI_Waitall(j, requests, status);
/* unpack data */
for (proc= 0; proc< nprocs; proc++)
{
if (RecvFromProcs[proc])
{
k= 0;
for (i= 0; i< RecvFromProcs[proc]; i++)
{
col_inds= (OffProcRows[starts[proc]+i] -> cols);
values = (OffProcRows[starts[proc]+i] -> data);
m = (OffProcRows[starts[proc]+i] -> ncols);
for (t= 0; t< m; t++)
{
col_inds[t]= (HYPRE_Int) rbuffer_ColsData[proc][k++];
values[t] = rbuffer_ColsData[proc][k++];
}
}
hypre_TFree(rbuffer_ColsData[proc]);
} /* if (RecvFromProcs[proc]) */
} /* for (proc= 0; proc< nprocs; proc++) */
hypre_TFree(rbuffer_ColsData);
hypre_TFree(requests);
hypre_TFree(status);
for (proc= 0; proc< nprocs; proc++)
{
hypre_TFree(send_RowsNcols[proc]);
hypre_TFree(vals[proc]);
}
hypre_TFree(send_RowsNcols);
hypre_TFree(vals);
hypre_TFree(tot_sendColsData);
hypre_TFree(tot_nsendRowsNcols);
hypre_TFree(send_ColsData_alloc);
hypre_TFree(send_RowsNcols_alloc);
hypre_TFree(SendToProcs);
hypre_TFree(RecvFromProcs);
hypre_TFree(starts);
*OffProcRows_ptr= OffProcRows;
return ierr;
}
|
lu.c |
/*[]*/
typedef long long __int64_t;
/*[]*/
typedef __int64_t __darwin_off_t;
/*[]*/
typedef __darwin_off_t fpos_t;
/*[]*/
struct __sbuf {
unsigned char *_base;
int _size;
} ;
/*[]*/
struct __sFILEX ;
/*[]*/
struct __sFILE {
unsigned char *_p;
int _r;
int _w;
short _flags;
short _file;
struct __sbuf _bf;
int _lbfsize;
void *_cookie;
int ( *_close )(void *);
int ( *_read )(void *, char * , int );
fpos_t ( *_seek )(void *, fpos_t , int );
int ( *_write )(void *, const char * , int );
struct __sbuf _ub;
struct __sFILEX *_extra;
int _ur;
unsigned char _ubuf[3];
unsigned char _nbuf[1];
struct __sbuf _lb;
int _blksize;
fpos_t _offset;
} ;
/*[]*/
typedef struct __sFILE FILE;
/*[]*/
int fclose(FILE *);
/*[]*/
int fgetc(FILE *);
/*[]*/
FILE *fopen(const char *restrict __filename, const char *restrict __mode);
/*[]*/
int fscanf(FILE *restrict , const char *restrict , ...);
/*[]*/
int printf(const char *restrict , ...);
/*[]*/
void exit(int );
/*[]*/
extern double fabs(double );
/*[]*/
extern double sqrt(double );
/*[]*/
extern int omp_get_num_threads(void );
/*[]*/
typedef int boolean;
/*[]*/
extern void timer_clear(int );
/*[]*/
extern void timer_start(int );
/*[]*/
extern void timer_stop(int );
/*[]*/
extern double timer_read(int );
/*[]*/
extern void c_print_results(char *name, char class , int n1 , int n2 , int n3 , int niter , int nthreads , double t , double mops , char *optype , int passed_verification , char *npbversion , char *compiletime , char *cc , char *clink , char *c_lib , char *c_inc , char *cflags , char *clinkflags , char *rand);
/*[]*/
static int nx;
/*[]*/
static int ny;
/*[]*/
static int nz;
/*[]*/
static int nx0;
/*[]*/
static int ny0;
/*[]*/
static int nz0;
/*[]*/
static int ist;
/*[]*/
static int iend;
/*[]*/
static int jst;
/*[]*/
static int jend;
/*[]*/
static int ii1;
/*[]*/
static int ii2;
/*[]*/
static int ji1;
/*[]*/
static int ji2;
/*[]*/
static int ki1;
/*[]*/
static int ki2;
/*[]*/
static double dxi;
/*[]*/
static double deta;
/*[]*/
static double dzeta;
/*[]*/
static double tx1;
/*[]*/
static double tx2;
/*[]*/
static double tx3;
/*[]*/
static double ty1;
/*[]*/
static double ty2;
/*[]*/
static double ty3;
/*[]*/
static double tz1;
/*[]*/
static double tz2;
/*[]*/
static double tz3;
/*[]*/
static double dx1;
/*[]*/
static double dx2;
/*[]*/
static double dx3;
/*[]*/
static double dx4;
/*[]*/
static double dx5;
/*[]*/
static double dy1;
/*[]*/
static double dy2;
/*[]*/
static double dy3;
/*[]*/
static double dy4;
/*[]*/
static double dy5;
/*[]*/
static double dz1;
/*[]*/
static double dz2;
/*[]*/
static double dz3;
/*[]*/
static double dz4;
/*[]*/
static double dz5;
/*[]*/
static double dssp;
/*[]*/
static double u[12][12 / 2 * 2 + 1][12 / 2 * 2 + 1][5];
/*[]*/
static double rsd[12][12 / 2 * 2 + 1][12 / 2 * 2 + 1][5];
/*[]*/
static double frct[12][12 / 2 * 2 + 1][12 / 2 * 2 + 1][5];
/*[]*/
static double flux[12][12 / 2 * 2 + 1][12 / 2 * 2 + 1][5];
/*[]*/
static int ipr;
/*[]*/
static int inorm;
/*[]*/
static int itmax;
/*[]*/
static double dt;
/*[]*/
static double omega;
/*[]*/
static double tolrsd[5];
/*[]*/
static double rsdnm[5];
/*[]*/
static double errnm[5];
/*[]*/
static double frc;
/*[]*/
static double a[12][12][5][5];
/*[]*/
static double b[12][12][5][5];
/*[]*/
static double c[12][12][5][5];
/*[]*/
static double d[12][12][5][5];
/*[]*/
static double ce[5][13];
/*[]*/
static double maxtime;
/*[]*/
static boolean flag[12 / 2 * 2 + 1];
/*[]*/
static void blts(int nx, int ny , int nz , int k , double omega , double v[12][12 / 2 * 2 + 1][12 / 2 * 2 + 1][5] , double ldz[12][12][5][5] , double ldy[12][12][5][5] , double ldx[12][12][5][5] , double d[12][12][5][5] , int ist , int iend , int jst , int jend , int nx0 , int ny0);
/*[]*/
static void buts(int nx, int ny , int nz , int k , double omega , double v[12][12 / 2 * 2 + 1][12 / 2 * 2 + 1][5] , double tv[12][12][5] , double d[12][12][5][5] , double udx[12][12][5][5] , double udy[12][12][5][5] , double udz[12][12][5][5] , int ist , int iend , int jst , int jend , int nx0 , int ny0);
/*[]*/
static void domain(void );
/*[]*/
static void erhs(void );
/*[]*/
static void error(void );
/*[]*/
static void exact(int i, int j , int k , double u000ijk[5]);
/*[]*/
static void jacld(int k);
/*[]*/
static void jacu(int k);
/*[]*/
static void l2norm(int nx0, int ny0 , int nz0 , int ist , int iend , int jst , int jend , double v[12][12 / 2 * 2 + 1][12 / 2 * 2 + 1][5] , double sum[5]);
/*[]*/
static void pintgr(void );
/*[]*/
static void read_input(void );
/*[]*/
static void rhs(void );
/*[]*/
static void setbv(void );
/*[]*/
static void setcoeff(void );
/*[]*/
static void setiv(void );
/*[]*/
static void ssor(void );
/*[]*/
static void verify(double xcr[5], double xce[5] , double xci , char *class , boolean *verified);
/*[]*/
/*[]*/
/*[]*/
int main(int argc, char **argv) {
/*[]*/
/*[]*/
char class;
/*[]*/
boolean verified;
/*[]*/
double mflops;
/*[]*/
int nthreads = 1;
/*[]*/
read_input();
/*[]*/
/*[]*/
domain();
/*[]*/
/*[]*/
setcoeff();
/*[]*/
/*[1]*/
#pragma omp parallel
{
/*[1]*/
/*[1]*/
int i;
/*[1]*/
int j;
/*[1]*/
int k;
/*[1]*/
int iglob;
/*[1]*/
int jglob;
/*[1]*/
#pragma omp for nowait
/*[1]*/
/*[1]*/
/*[1]*/
for (i = 0; i < nx; i++) {
/*[1]*/
/*[1]*/
iglob = i;
/*[1]*/
/*[1]*/
/*[1]*/
/*[1]*/
for (j = 0; j < ny; j++) {
/*[1]*/
/*[1]*/
jglob = j;
/*[1]*/
double *_imopVarPre239;
/*[1]*/
_imopVarPre239 = &u[i][j][0][0];
/*[1]*/
exact(iglob, jglob, 0, _imopVarPre239);
/*[1]*/
/*[1]*/
double *_imopVarPre242;
/*[1]*/
int _imopVarPre243;
/*[1]*/
_imopVarPre242 = &u[i][j][nz - 1][0];
/*[1]*/
_imopVarPre243 = nz - 1;
/*[1]*/
exact(iglob, jglob, _imopVarPre243, _imopVarPre242);
/*[1]*/
}
}
/*[1]*/
// #pragma omp dummyFlush BARRIER_START
/*[1]*/
#pragma omp barrier
/*[2]*/
#pragma omp for nowait
/*[2]*/
/*[2]*/
/*[2]*/
for (i = 0; i < nx; i++) {
/*[2]*/
/*[2]*/
iglob = i;
/*[2]*/
/*[2]*/
/*[2]*/
/*[2]*/
for (k = 0; k < nz; k++) {
/*[2]*/
/*[2]*/
double *_imopVarPre245;
/*[2]*/
_imopVarPre245 = &u[i][0][k][0];
/*[2]*/
exact(iglob, 0, k, _imopVarPre245);
/*[2]*/
}
}
/*[2]*/
// #pragma omp dummyFlush BARRIER_START
/*[2]*/
#pragma omp barrier
/*[3]*/
#pragma omp for nowait
/*[3]*/
/*[3]*/
/*[3]*/
for (i = 0; i < nx; i++) {
/*[3]*/
/*[3]*/
iglob = i;
/*[3]*/
/*[3]*/
/*[3]*/
/*[3]*/
for (k = 0; k < nz; k++) {
/*[3]*/
/*[3]*/
double *_imopVarPre248;
/*[3]*/
int _imopVarPre249;
/*[3]*/
_imopVarPre248 = &u[i][ny - 1][k][0];
/*[3]*/
_imopVarPre249 = ny0 - 1;
/*[3]*/
exact(iglob, _imopVarPre249, k, _imopVarPre248);
/*[3]*/
}
}
/*[3]*/
// #pragma omp dummyFlush BARRIER_START
/*[3]*/
#pragma omp barrier
/*[4]*/
#pragma omp for nowait
/*[4]*/
/*[4]*/
/*[4]*/
for (j = 0; j < ny; j++) {
/*[4]*/
/*[4]*/
jglob = j;
/*[4]*/
/*[4]*/
/*[4]*/
/*[4]*/
for (k = 0; k < nz; k++) {
/*[4]*/
/*[4]*/
double *_imopVarPre251;
/*[4]*/
_imopVarPre251 = &u[0][j][k][0];
/*[4]*/
exact(0, jglob, k, _imopVarPre251);
/*[4]*/
}
}
/*[4]*/
// #pragma omp dummyFlush BARRIER_START
/*[4]*/
#pragma omp barrier
/*[5]*/
#pragma omp for nowait
/*[5]*/
/*[5]*/
/*[5]*/
for (j = 0; j < ny; j++) {
/*[5]*/
/*[5]*/
jglob = j;
/*[5]*/
/*[5]*/
/*[5]*/
/*[5]*/
for (k = 0; k < nz; k++) {
/*[5]*/
/*[5]*/
double *_imopVarPre254;
/*[5]*/
int _imopVarPre255;
/*[5]*/
_imopVarPre254 = &u[nx - 1][j][k][0];
/*[5]*/
_imopVarPre255 = nx0 - 1;
/*[5]*/
exact(_imopVarPre255, jglob, k, _imopVarPre254);
/*[5]*/
}
}
}
/*[6]*/
#pragma omp parallel
{
/*[6]*/
/*[6]*/
int i;
/*[6]*/
int j;
/*[6]*/
int k;
/*[6]*/
int m;
/*[6]*/
int iglob;
/*[6]*/
int jglob;
/*[6]*/
double xi;
/*[6]*/
double eta;
/*[6]*/
double zeta;
/*[6]*/
double pxi;
/*[6]*/
double peta;
/*[6]*/
double pzeta;
/*[6]*/
double ue_1jk[5];
/*[6]*/
double ue_nx0jk[5];
/*[6]*/
double ue_i1k[5];
/*[6]*/
double ue_iny0k[5];
/*[6]*/
double ue_ij1[5];
/*[6]*/
double ue_ijnz[5];
/*[6]*/
#pragma omp for nowait
/*[6]*/
/*[6]*/
/*[6]*/
for (j = 0; j < ny; j++) {
/*[6]*/
/*[6]*/
jglob = j;
/*[6]*/
/*[6]*/
/*[6]*/
/*[6]*/
for (k = 1; k < nz - 1; k++) {
/*[6]*/
/*[6]*/
zeta = ((double) k) / (nz - 1);
/*[6]*/
int _imopVarPre361;
/*[6]*/
_imopVarPre361 = jglob != 0;
/*[6]*/
/*[6]*/
if (_imopVarPre361) {
/*[6]*/
/*[6]*/
_imopVarPre361 = jglob != ny0 - 1;
}
/*[6]*/
/*[6]*/
if (_imopVarPre361) {
/*[6]*/
/*[6]*/
eta = ((double) jglob) / (ny0 - 1);
/*[6]*/
/*[6]*/
/*[6]*/
/*[6]*/
for (i = 0; i < nx; i++) {
/*[6]*/
/*[6]*/
iglob = i;
/*[6]*/
int _imopVarPre363;
/*[6]*/
_imopVarPre363 = iglob != 0;
/*[6]*/
/*[6]*/
if (_imopVarPre363) {
/*[6]*/
/*[6]*/
_imopVarPre363 = iglob != nx0 - 1;
}
/*[6]*/
/*[6]*/
if (_imopVarPre363) {
/*[6]*/
/*[6]*/
xi = ((double) iglob) / (nx0 - 1);
/*[6]*/
exact(0, jglob, k, ue_1jk);
/*[6]*/
/*[6]*/
int _imopVarPre365;
/*[6]*/
_imopVarPre365 = nx0 - 1;
/*[6]*/
exact(_imopVarPre365, jglob, k, ue_nx0jk);
/*[6]*/
/*[6]*/
exact(iglob, 0, k, ue_i1k);
/*[6]*/
/*[6]*/
int _imopVarPre367;
/*[6]*/
_imopVarPre367 = ny0 - 1;
/*[6]*/
exact(iglob, _imopVarPre367, k, ue_iny0k);
/*[6]*/
/*[6]*/
exact(iglob, jglob, 0, ue_ij1);
/*[6]*/
/*[6]*/
int _imopVarPre369;
/*[6]*/
_imopVarPre369 = nz - 1;
/*[6]*/
exact(iglob, jglob, _imopVarPre369, ue_ijnz);
/*[6]*/
/*[6]*/
/*[6]*/
/*[6]*/
/*[6]*/
for (m = 0; m < 5; m++) {
/*[6]*/
/*[6]*/
pxi = (1.0 - xi) * ue_1jk[m] + xi * ue_nx0jk[m];
/*[6]*/
peta = (1.0 - eta) * ue_i1k[m] + eta * ue_iny0k[m];
/*[6]*/
pzeta = (1.0 - zeta) * ue_ij1[m] + zeta * ue_ijnz[m];
/*[6]*/
u[i][j][k][m] = pxi + peta + pzeta - pxi * peta - peta * pzeta - pzeta * pxi + pxi * peta * pzeta;
}
}
}
}
}
}
}
/*[6, 7]*/
#pragma omp parallel
{
/*[6, 7]*/
/*[6, 7]*/
int i;
/*[6, 7]*/
int j;
/*[6, 7]*/
int k;
/*[6, 7]*/
int m;
/*[6, 7]*/
int iglob;
/*[6, 7]*/
int jglob;
/*[6, 7]*/
int L1;
/*[6, 7]*/
int L2;
/*[6, 7]*/
int ist1;
/*[6, 7]*/
int iend1;
/*[6, 7]*/
int jst1;
/*[6, 7]*/
int jend1;
/*[6, 7]*/
double dsspm;
/*[6, 7]*/
double xi;
/*[6, 7]*/
double eta;
/*[6, 7]*/
double zeta;
/*[6, 7]*/
double q;
/*[6, 7]*/
double u21;
/*[6, 7]*/
double u31;
/*[6, 7]*/
double u41;
/*[6, 7]*/
double tmp;
/*[6, 7]*/
double u21i;
/*[6, 7]*/
double u31i;
/*[6, 7]*/
double u41i;
/*[6, 7]*/
double u51i;
/*[6, 7]*/
double u21j;
/*[6, 7]*/
double u31j;
/*[6, 7]*/
double u41j;
/*[6, 7]*/
double u51j;
/*[6, 7]*/
double u21k;
/*[6, 7]*/
double u31k;
/*[6, 7]*/
double u41k;
/*[6, 7]*/
double u51k;
/*[6, 7]*/
double u21im1;
/*[6, 7]*/
double u31im1;
/*[6, 7]*/
double u41im1;
/*[6, 7]*/
double u51im1;
/*[6, 7]*/
double u21jm1;
/*[6, 7]*/
double u31jm1;
/*[6, 7]*/
double u41jm1;
/*[6, 7]*/
double u51jm1;
/*[6, 7]*/
double u21km1;
/*[6, 7]*/
double u31km1;
/*[6, 7]*/
double u41km1;
/*[6, 7]*/
double u51km1;
/*[6, 7]*/
dsspm = dssp;
/*[6, 7]*/
#pragma omp for nowait
/*[6, 7]*/
/*[6, 7]*/
/*[6, 7]*/
for (i = 0; i < nx; i++) {
/*[6, 7]*/
/*[6, 7]*/
/*[6, 7]*/
/*[6, 7]*/
/*[6, 7]*/
for (j = 0; j < ny; j++) {
/*[6, 7]*/
/*[6, 7]*/
/*[6, 7]*/
/*[6, 7]*/
/*[6, 7]*/
for (k = 0; k < nz; k++) {
/*[6, 7]*/
/*[6, 7]*/
/*[6, 7]*/
/*[6, 7]*/
/*[6, 7]*/
for (m = 0; m < 5; m++) {
/*[6, 7]*/
/*[6, 7]*/
frct[i][j][k][m] = 0.0;
}
}
}
}
/*[6, 7]*/
#pragma omp for nowait
/*[6, 7]*/
/*[6, 7]*/
/*[6, 7]*/
for (i = 0; i < nx; i++) {
/*[6, 7]*/
/*[6, 7]*/
iglob = i;
/*[6, 7]*/
xi = ((double) iglob) / (nx0 - 1);
/*[6, 7]*/
/*[6, 7]*/
/*[6, 7]*/
/*[6, 7]*/
for (j = 0; j < ny; j++) {
/*[6, 7]*/
/*[6, 7]*/
jglob = j;
/*[6, 7]*/
eta = ((double) jglob) / (ny0 - 1);
/*[6, 7]*/
/*[6, 7]*/
/*[6, 7]*/
/*[6, 7]*/
for (k = 0; k < nz; k++) {
/*[6, 7]*/
/*[6, 7]*/
zeta = ((double) k) / (nz - 1);
/*[6, 7]*/
/*[6, 7]*/
/*[6, 7]*/
/*[6, 7]*/
for (m = 0; m < 5; m++) {
/*[6, 7]*/
/*[6, 7]*/
rsd[i][j][k][m] = ce[m][0] + ce[m][1] * xi + ce[m][2] * eta + ce[m][3] * zeta + ce[m][4] * xi * xi + ce[m][5] * eta * eta + ce[m][6] * zeta * zeta + ce[m][7] * xi * xi * xi + ce[m][8] * eta * eta * eta + ce[m][9] * zeta * zeta * zeta + ce[m][10] * xi * xi * xi * xi + ce[m][11] * eta * eta * eta * eta + ce[m][12] * zeta * zeta * zeta * zeta;
}
}
}
}
/*[6, 7]*/
// #pragma omp dummyFlush BARRIER_START
/*[6, 7]*/
#pragma omp barrier
/*[6, 8]*/
L1 = 0;
/*[6, 8]*/
L2 = nx - 1;
/*[6, 8]*/
#pragma omp for nowait
/*[6, 8]*/
/*[6, 8]*/
/*[6, 8]*/
for (i = L1; i <= L2; i++) {
/*[6, 8]*/
/*[6, 8]*/
/*[6, 8]*/
/*[6, 8]*/
/*[6, 8]*/
for (j = jst; j <= jend; j++) {
/*[6, 8]*/
/*[6, 8]*/
/*[6, 8]*/
/*[6, 8]*/
/*[6, 8]*/
for (k = 1; k < nz - 1; k++) {
/*[6, 8]*/
/*[6, 8]*/
flux[i][j][k][0] = rsd[i][j][k][1];
/*[6, 8]*/
u21 = rsd[i][j][k][1] / rsd[i][j][k][0];
/*[6, 8]*/
q = 0.50 * (rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3]) / rsd[i][j][k][0];
/*[6, 8]*/
flux[i][j][k][1] = rsd[i][j][k][1] * u21 + 0.40e+00 * (rsd[i][j][k][4] - q);
/*[6, 8]*/
flux[i][j][k][2] = rsd[i][j][k][2] * u21;
/*[6, 8]*/
flux[i][j][k][3] = rsd[i][j][k][3] * u21;
/*[6, 8]*/
flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u21;
}
}
}
/*[6, 8]*/
// #pragma omp dummyFlush BARRIER_START
/*[6, 8]*/
#pragma omp barrier
/*[6, 9]*/
#pragma omp for nowait
/*[6, 9]*/
/*[6, 9]*/
/*[6, 9]*/
for (j = jst; j <= jend; j++) {
/*[6, 9]*/
/*[6, 9]*/
/*[6, 9]*/
/*[6, 9]*/
/*[6, 9]*/
for (k = 1; k <= nz - 2; k++) {
/*[6, 9]*/
/*[6, 9]*/
/*[6, 9]*/
/*[6, 9]*/
/*[6, 9]*/
for (i = ist; i <= iend; i++) {
/*[6, 9]*/
/*[6, 9]*/
/*[6, 9]*/
/*[6, 9]*/
/*[6, 9]*/
for (m = 0; m < 5; m++) {
/*[6, 9]*/
/*[6, 9]*/
frct[i][j][k][m] = frct[i][j][k][m] - tx2 * (flux[i + 1][j][k][m] - flux[i - 1][j][k][m]);
}
}
/*[6, 9]*/
/*[6, 9]*/
/*[6, 9]*/
/*[6, 9]*/
for (i = ist; i <= L2; i++) {
/*[6, 9]*/
/*[6, 9]*/
tmp = 1.0 / rsd[i][j][k][0];
/*[6, 9]*/
u21i = tmp * rsd[i][j][k][1];
/*[6, 9]*/
u31i = tmp * rsd[i][j][k][2];
/*[6, 9]*/
u41i = tmp * rsd[i][j][k][3];
/*[6, 9]*/
u51i = tmp * rsd[i][j][k][4];
/*[6, 9]*/
tmp = 1.0 / rsd[i - 1][j][k][0];
/*[6, 9]*/
u21im1 = tmp * rsd[i - 1][j][k][1];
/*[6, 9]*/
u31im1 = tmp * rsd[i - 1][j][k][2];
/*[6, 9]*/
u41im1 = tmp * rsd[i - 1][j][k][3];
/*[6, 9]*/
u51im1 = tmp * rsd[i - 1][j][k][4];
/*[6, 9]*/
flux[i][j][k][1] = (4.0 / 3.0) * tx3 * (u21i - u21im1);
/*[6, 9]*/
flux[i][j][k][2] = tx3 * (u31i - u31im1);
/*[6, 9]*/
flux[i][j][k][3] = tx3 * (u41i - u41im1);
/*[6, 9]*/
flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tx3 * ((u21i * u21i + u31i * u31i + u41i * u41i) - (u21im1 * u21im1 + u31im1 * u31im1 + u41im1 * u41im1)) + (1.0 / 6.0) * tx3 * (u21i * u21i - u21im1 * u21im1) + 1.40e+00 * 1.40e+00 * tx3 * (u51i - u51im1);
}
/*[6, 9]*/
/*[6, 9]*/
/*[6, 9]*/
/*[6, 9]*/
for (i = ist; i <= iend; i++) {
/*[6, 9]*/
/*[6, 9]*/
frct[i][j][k][0] = frct[i][j][k][0] + dx1 * tx1 * (rsd[i - 1][j][k][0] - 2.0 * rsd[i][j][k][0] + rsd[i + 1][j][k][0]);
/*[6, 9]*/
frct[i][j][k][1] = frct[i][j][k][1] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][1] - flux[i][j][k][1]) + dx2 * tx1 * (rsd[i - 1][j][k][1] - 2.0 * rsd[i][j][k][1] + rsd[i + 1][j][k][1]);
/*[6, 9]*/
frct[i][j][k][2] = frct[i][j][k][2] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][2] - flux[i][j][k][2]) + dx3 * tx1 * (rsd[i - 1][j][k][2] - 2.0 * rsd[i][j][k][2] + rsd[i + 1][j][k][2]);
/*[6, 9]*/
frct[i][j][k][3] = frct[i][j][k][3] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][3] - flux[i][j][k][3]) + dx4 * tx1 * (rsd[i - 1][j][k][3] - 2.0 * rsd[i][j][k][3] + rsd[i + 1][j][k][3]);
/*[6, 9]*/
frct[i][j][k][4] = frct[i][j][k][4] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][4] - flux[i][j][k][4]) + dx5 * tx1 * (rsd[i - 1][j][k][4] - 2.0 * rsd[i][j][k][4] + rsd[i + 1][j][k][4]);
}
/*[6, 9]*/
/*[6, 9]*/
/*[6, 9]*/
/*[6, 9]*/
for (m = 0; m < 5; m++) {
/*[6, 9]*/
/*[6, 9]*/
frct[1][j][k][m] = frct[1][j][k][m] - dsspm * (+5.0 * rsd[1][j][k][m] - 4.0 * rsd[2][j][k][m] + rsd[3][j][k][m]);
/*[6, 9]*/
frct[2][j][k][m] = frct[2][j][k][m] - dsspm * (-4.0 * rsd[1][j][k][m] + 6.0 * rsd[2][j][k][m] - 4.0 * rsd[3][j][k][m] + rsd[4][j][k][m]);
}
/*[6, 9]*/
ist1 = 3;
/*[6, 9]*/
iend1 = nx - 4;
/*[6, 9]*/
/*[6, 9]*/
/*[6, 9]*/
/*[6, 9]*/
for (i = ist1; i <= iend1; i++) {
/*[6, 9]*/
/*[6, 9]*/
/*[6, 9]*/
/*[6, 9]*/
/*[6, 9]*/
for (m = 0; m < 5; m++) {
/*[6, 9]*/
/*[6, 9]*/
frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i - 2][j][k][m] - 4.0 * rsd[i - 1][j][k][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i + 1][j][k][m] + rsd[i + 2][j][k][m]);
}
}
/*[6, 9]*/
/*[6, 9]*/
/*[6, 9]*/
/*[6, 9]*/
for (m = 0; m < 5; m++) {
/*[6, 9]*/
/*[6, 9]*/
frct[nx - 3][j][k][m] = frct[nx - 3][j][k][m] - dsspm * (rsd[nx - 5][j][k][m] - 4.0 * rsd[nx - 4][j][k][m] + 6.0 * rsd[nx - 3][j][k][m] - 4.0 * rsd[nx - 2][j][k][m]);
/*[6, 9]*/
frct[nx - 2][j][k][m] = frct[nx - 2][j][k][m] - dsspm * (rsd[nx - 4][j][k][m] - 4.0 * rsd[nx - 3][j][k][m] + 5.0 * rsd[nx - 2][j][k][m]);
}
}
}
/*[6, 9]*/
// #pragma omp dummyFlush BARRIER_START
/*[6, 9]*/
#pragma omp barrier
/*[6, 10]*/
L1 = 0;
/*[6, 10]*/
L2 = ny - 1;
/*[6, 10]*/
#pragma omp for nowait
/*[6, 10]*/
/*[6, 10]*/
/*[6, 10]*/
for (i = ist; i <= iend; i++) {
/*[6, 10]*/
/*[6, 10]*/
/*[6, 10]*/
/*[6, 10]*/
/*[6, 10]*/
for (j = L1; j <= L2; j++) {
/*[6, 10]*/
/*[6, 10]*/
/*[6, 10]*/
/*[6, 10]*/
/*[6, 10]*/
for (k = 1; k <= nz - 2; k++) {
/*[6, 10]*/
/*[6, 10]*/
flux[i][j][k][0] = rsd[i][j][k][2];
/*[6, 10]*/
u31 = rsd[i][j][k][2] / rsd[i][j][k][0];
/*[6, 10]*/
q = 0.50 * (rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3]) / rsd[i][j][k][0];
/*[6, 10]*/
flux[i][j][k][1] = rsd[i][j][k][1] * u31;
/*[6, 10]*/
flux[i][j][k][2] = rsd[i][j][k][2] * u31 + 0.40e+00 * (rsd[i][j][k][4] - q);
/*[6, 10]*/
flux[i][j][k][3] = rsd[i][j][k][3] * u31;
/*[6, 10]*/
flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u31;
}
}
}
/*[6, 10]*/
// #pragma omp dummyFlush BARRIER_START
/*[6, 10]*/
#pragma omp barrier
/*[6, 11]*/
#pragma omp for nowait
/*[6, 11]*/
/*[6, 11]*/
/*[6, 11]*/
for (i = ist; i <= iend; i++) {
/*[6, 11]*/
/*[6, 11]*/
/*[6, 11]*/
/*[6, 11]*/
/*[6, 11]*/
for (k = 1; k <= nz - 2; k++) {
/*[6, 11]*/
/*[6, 11]*/
/*[6, 11]*/
/*[6, 11]*/
/*[6, 11]*/
for (j = jst; j <= jend; j++) {
/*[6, 11]*/
/*[6, 11]*/
/*[6, 11]*/
/*[6, 11]*/
/*[6, 11]*/
for (m = 0; m < 5; m++) {
/*[6, 11]*/
/*[6, 11]*/
frct[i][j][k][m] = frct[i][j][k][m] - ty2 * (flux[i][j + 1][k][m] - flux[i][j - 1][k][m]);
}
}
/*[6, 11]*/
/*[6, 11]*/
/*[6, 11]*/
/*[6, 11]*/
for (j = jst; j <= L2; j++) {
/*[6, 11]*/
/*[6, 11]*/
tmp = 1.0 / rsd[i][j][k][0];
/*[6, 11]*/
u21j = tmp * rsd[i][j][k][1];
/*[6, 11]*/
u31j = tmp * rsd[i][j][k][2];
/*[6, 11]*/
u41j = tmp * rsd[i][j][k][3];
/*[6, 11]*/
u51j = tmp * rsd[i][j][k][4];
/*[6, 11]*/
tmp = 1.0 / rsd[i][j - 1][k][0];
/*[6, 11]*/
u21jm1 = tmp * rsd[i][j - 1][k][1];
/*[6, 11]*/
u31jm1 = tmp * rsd[i][j - 1][k][2];
/*[6, 11]*/
u41jm1 = tmp * rsd[i][j - 1][k][3];
/*[6, 11]*/
u51jm1 = tmp * rsd[i][j - 1][k][4];
/*[6, 11]*/
flux[i][j][k][1] = ty3 * (u21j - u21jm1);
/*[6, 11]*/
flux[i][j][k][2] = (4.0 / 3.0) * ty3 * (u31j - u31jm1);
/*[6, 11]*/
flux[i][j][k][3] = ty3 * (u41j - u41jm1);
/*[6, 11]*/
flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * ty3 * ((u21j * u21j + u31j * u31j + u41j * u41j) - (u21jm1 * u21jm1 + u31jm1 * u31jm1 + u41jm1 * u41jm1)) + (1.0 / 6.0) * ty3 * (u31j * u31j - u31jm1 * u31jm1) + 1.40e+00 * 1.40e+00 * ty3 * (u51j - u51jm1);
}
/*[6, 11]*/
/*[6, 11]*/
/*[6, 11]*/
/*[6, 11]*/
for (j = jst; j <= jend; j++) {
/*[6, 11]*/
/*[6, 11]*/
frct[i][j][k][0] = frct[i][j][k][0] + dy1 * ty1 * (rsd[i][j - 1][k][0] - 2.0 * rsd[i][j][k][0] + rsd[i][j + 1][k][0]);
/*[6, 11]*/
frct[i][j][k][1] = frct[i][j][k][1] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][1] - flux[i][j][k][1]) + dy2 * ty1 * (rsd[i][j - 1][k][1] - 2.0 * rsd[i][j][k][1] + rsd[i][j + 1][k][1]);
/*[6, 11]*/
frct[i][j][k][2] = frct[i][j][k][2] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][2] - flux[i][j][k][2]) + dy3 * ty1 * (rsd[i][j - 1][k][2] - 2.0 * rsd[i][j][k][2] + rsd[i][j + 1][k][2]);
/*[6, 11]*/
frct[i][j][k][3] = frct[i][j][k][3] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][3] - flux[i][j][k][3]) + dy4 * ty1 * (rsd[i][j - 1][k][3] - 2.0 * rsd[i][j][k][3] + rsd[i][j + 1][k][3]);
/*[6, 11]*/
frct[i][j][k][4] = frct[i][j][k][4] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][4] - flux[i][j][k][4]) + dy5 * ty1 * (rsd[i][j - 1][k][4] - 2.0 * rsd[i][j][k][4] + rsd[i][j + 1][k][4]);
}
/*[6, 11]*/
/*[6, 11]*/
/*[6, 11]*/
/*[6, 11]*/
for (m = 0; m < 5; m++) {
/*[6, 11]*/
/*[6, 11]*/
frct[i][1][k][m] = frct[i][1][k][m] - dsspm * (+5.0 * rsd[i][1][k][m] - 4.0 * rsd[i][2][k][m] + rsd[i][3][k][m]);
/*[6, 11]*/
frct[i][2][k][m] = frct[i][2][k][m] - dsspm * (-4.0 * rsd[i][1][k][m] + 6.0 * rsd[i][2][k][m] - 4.0 * rsd[i][3][k][m] + rsd[i][4][k][m]);
}
/*[6, 11]*/
jst1 = 3;
/*[6, 11]*/
jend1 = ny - 4;
/*[6, 11]*/
/*[6, 11]*/
/*[6, 11]*/
/*[6, 11]*/
for (j = jst1; j <= jend1; j++) {
/*[6, 11]*/
/*[6, 11]*/
/*[6, 11]*/
/*[6, 11]*/
/*[6, 11]*/
for (m = 0; m < 5; m++) {
/*[6, 11]*/
/*[6, 11]*/
frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i][j - 2][k][m] - 4.0 * rsd[i][j - 1][k][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i][j + 1][k][m] + rsd[i][j + 2][k][m]);
}
}
/*[6, 11]*/
/*[6, 11]*/
/*[6, 11]*/
/*[6, 11]*/
for (m = 0; m < 5; m++) {
/*[6, 11]*/
/*[6, 11]*/
frct[i][ny - 3][k][m] = frct[i][ny - 3][k][m] - dsspm * (rsd[i][ny - 5][k][m] - 4.0 * rsd[i][ny - 4][k][m] + 6.0 * rsd[i][ny - 3][k][m] - 4.0 * rsd[i][ny - 2][k][m]);
/*[6, 11]*/
frct[i][ny - 2][k][m] = frct[i][ny - 2][k][m] - dsspm * (rsd[i][ny - 4][k][m] - 4.0 * rsd[i][ny - 3][k][m] + 5.0 * rsd[i][ny - 2][k][m]);
}
}
}
/*[6, 11]*/
// #pragma omp dummyFlush BARRIER_START
/*[6, 11]*/
#pragma omp barrier
/*[6, 12]*/
#pragma omp for nowait
/*[6, 12]*/
/*[6, 12]*/
/*[6, 12]*/
for (i = ist; i <= iend; i++) {
/*[6, 12]*/
/*[6, 12]*/
/*[6, 12]*/
/*[6, 12]*/
/*[6, 12]*/
for (j = jst; j <= jend; j++) {
/*[6, 12]*/
/*[6, 12]*/
/*[6, 12]*/
/*[6, 12]*/
/*[6, 12]*/
for (k = 0; k <= nz - 1; k++) {
/*[6, 12]*/
/*[6, 12]*/
flux[i][j][k][0] = rsd[i][j][k][3];
/*[6, 12]*/
u41 = rsd[i][j][k][3] / rsd[i][j][k][0];
/*[6, 12]*/
q = 0.50 * (rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3]) / rsd[i][j][k][0];
/*[6, 12]*/
flux[i][j][k][1] = rsd[i][j][k][1] * u41;
/*[6, 12]*/
flux[i][j][k][2] = rsd[i][j][k][2] * u41;
/*[6, 12]*/
flux[i][j][k][3] = rsd[i][j][k][3] * u41 + 0.40e+00 * (rsd[i][j][k][4] - q);
/*[6, 12]*/
flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u41;
}
/*[6, 12]*/
/*[6, 12]*/
/*[6, 12]*/
/*[6, 12]*/
for (k = 1; k <= nz - 2; k++) {
/*[6, 12]*/
/*[6, 12]*/
/*[6, 12]*/
/*[6, 12]*/
/*[6, 12]*/
for (m = 0; m < 5; m++) {
/*[6, 12]*/
/*[6, 12]*/
frct[i][j][k][m] = frct[i][j][k][m] - tz2 * (flux[i][j][k + 1][m] - flux[i][j][k - 1][m]);
}
}
/*[6, 12]*/
/*[6, 12]*/
/*[6, 12]*/
/*[6, 12]*/
for (k = 1; k <= nz - 1; k++) {
/*[6, 12]*/
/*[6, 12]*/
tmp = 1.0 / rsd[i][j][k][0];
/*[6, 12]*/
u21k = tmp * rsd[i][j][k][1];
/*[6, 12]*/
u31k = tmp * rsd[i][j][k][2];
/*[6, 12]*/
u41k = tmp * rsd[i][j][k][3];
/*[6, 12]*/
u51k = tmp * rsd[i][j][k][4];
/*[6, 12]*/
tmp = 1.0 / rsd[i][j][k - 1][0];
/*[6, 12]*/
u21km1 = tmp * rsd[i][j][k - 1][1];
/*[6, 12]*/
u31km1 = tmp * rsd[i][j][k - 1][2];
/*[6, 12]*/
u41km1 = tmp * rsd[i][j][k - 1][3];
/*[6, 12]*/
u51km1 = tmp * rsd[i][j][k - 1][4];
/*[6, 12]*/
flux[i][j][k][1] = tz3 * (u21k - u21km1);
/*[6, 12]*/
flux[i][j][k][2] = tz3 * (u31k - u31km1);
/*[6, 12]*/
flux[i][j][k][3] = (4.0 / 3.0) * tz3 * (u41k - u41km1);
/*[6, 12]*/
flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tz3 * ((u21k * u21k + u31k * u31k + u41k * u41k) - (u21km1 * u21km1 + u31km1 * u31km1 + u41km1 * u41km1)) + (1.0 / 6.0) * tz3 * (u41k * u41k - u41km1 * u41km1) + 1.40e+00 * 1.40e+00 * tz3 * (u51k - u51km1);
}
/*[6, 12]*/
/*[6, 12]*/
/*[6, 12]*/
/*[6, 12]*/
for (k = 1; k <= nz - 2; k++) {
/*[6, 12]*/
/*[6, 12]*/
frct[i][j][k][0] = frct[i][j][k][0] + dz1 * tz1 * (rsd[i][j][k + 1][0] - 2.0 * rsd[i][j][k][0] + rsd[i][j][k - 1][0]);
/*[6, 12]*/
frct[i][j][k][1] = frct[i][j][k][1] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][1] - flux[i][j][k][1]) + dz2 * tz1 * (rsd[i][j][k + 1][1] - 2.0 * rsd[i][j][k][1] + rsd[i][j][k - 1][1]);
/*[6, 12]*/
frct[i][j][k][2] = frct[i][j][k][2] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][2] - flux[i][j][k][2]) + dz3 * tz1 * (rsd[i][j][k + 1][2] - 2.0 * rsd[i][j][k][2] + rsd[i][j][k - 1][2]);
/*[6, 12]*/
frct[i][j][k][3] = frct[i][j][k][3] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][3] - flux[i][j][k][3]) + dz4 * tz1 * (rsd[i][j][k + 1][3] - 2.0 * rsd[i][j][k][3] + rsd[i][j][k - 1][3]);
/*[6, 12]*/
frct[i][j][k][4] = frct[i][j][k][4] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][4] - flux[i][j][k][4]) + dz5 * tz1 * (rsd[i][j][k + 1][4] - 2.0 * rsd[i][j][k][4] + rsd[i][j][k - 1][4]);
}
/*[6, 12]*/
/*[6, 12]*/
/*[6, 12]*/
/*[6, 12]*/
for (m = 0; m < 5; m++) {
/*[6, 12]*/
/*[6, 12]*/
frct[i][j][1][m] = frct[i][j][1][m] - dsspm * (+5.0 * rsd[i][j][1][m] - 4.0 * rsd[i][j][2][m] + rsd[i][j][3][m]);
/*[6, 12]*/
frct[i][j][2][m] = frct[i][j][2][m] - dsspm * (-4.0 * rsd[i][j][1][m] + 6.0 * rsd[i][j][2][m] - 4.0 * rsd[i][j][3][m] + rsd[i][j][4][m]);
}
/*[6, 12]*/
/*[6, 12]*/
/*[6, 12]*/
/*[6, 12]*/
for (k = 3; k <= nz - 4; k++) {
/*[6, 12]*/
/*[6, 12]*/
/*[6, 12]*/
/*[6, 12]*/
/*[6, 12]*/
for (m = 0; m < 5; m++) {
/*[6, 12]*/
/*[6, 12]*/
frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i][j][k - 2][m] - 4.0 * rsd[i][j][k - 1][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i][j][k + 1][m] + rsd[i][j][k + 2][m]);
}
}
/*[6, 12]*/
/*[6, 12]*/
/*[6, 12]*/
/*[6, 12]*/
for (m = 0; m < 5; m++) {
/*[6, 12]*/
/*[6, 12]*/
frct[i][j][nz - 3][m] = frct[i][j][nz - 3][m] - dsspm * (rsd[i][j][nz - 5][m] - 4.0 * rsd[i][j][nz - 4][m] + 6.0 * rsd[i][j][nz - 3][m] - 4.0 * rsd[i][j][nz - 2][m]);
/*[6, 12]*/
frct[i][j][nz - 2][m] = frct[i][j][nz - 2][m] - dsspm * (rsd[i][j][nz - 4][m] - 4.0 * rsd[i][j][nz - 3][m] + 5.0 * rsd[i][j][nz - 2][m]);
}
}
}
}
/*[13]*/
#pragma omp parallel
{
/*[13]*/
/*[13]*/
#pragma omp master
{
/*[13]*/
/*[13]*/
nthreads = omp_get_num_threads();
/*[13]*/
}
}
/*[13]*/
int i;
/*[13]*/
int j;
/*[13]*/
int k;
/*[13]*/
int m;
/*[13]*/
int istep;
/*[13]*/
double tmp;
/*[13]*/
double delunm[5];
/*[13]*/
double tv[12][12][5];
/*[13]*/
tmp = 1.0 / (omega * (2.0 - omega));
/*[13, 14]*/
#pragma omp parallel private(i, j, k, m)
{
/*[13, 14]*/
/*[13, 14]*/
#pragma omp for nowait
/*[13, 14]*/
/*[13, 14]*/
/*[13, 14]*/
for (i = 0; i < 12; i++) {
/*[13, 14]*/
/*[13, 14]*/
/*[13, 14]*/
/*[13, 14]*/
/*[13, 14]*/
for (j = 0; j < 12; j++) {
/*[13, 14]*/
/*[13, 14]*/
/*[13, 14]*/
/*[13, 14]*/
/*[13, 14]*/
for (k = 0; k < 5; k++) {
/*[13, 14]*/
/*[13, 14]*/
/*[13, 14]*/
/*[13, 14]*/
/*[13, 14]*/
for (m = 0; m < 5; m++) {
/*[13, 14]*/
/*[13, 14]*/
a[i][j][k][m] = 0.0;
/*[13, 14]*/
b[i][j][k][m] = 0.0;
/*[13, 14]*/
c[i][j][k][m] = 0.0;
/*[13, 14]*/
d[i][j][k][m] = 0.0;
}
}
}
}
}
/*[13, 14, 15]*/
#pragma omp parallel
{
/*[13, 14, 15]*/
/*[13, 14, 15]*/
int i_imopVarPre84;
/*[13, 14, 15]*/
int j_imopVarPre85;
/*[13, 14, 15]*/
int k_imopVarPre86;
/*[13, 14, 15]*/
int m_imopVarPre87;
/*[13, 14, 15]*/
int L1;
/*[13, 14, 15]*/
int L2;
/*[13, 14, 15]*/
int ist1;
/*[13, 14, 15]*/
int iend1;
/*[13, 14, 15]*/
int jst1;
/*[13, 14, 15]*/
int jend1;
/*[13, 14, 15]*/
double q;
/*[13, 14, 15]*/
double u21;
/*[13, 14, 15]*/
double u31;
/*[13, 14, 15]*/
double u41;
/*[13, 14, 15]*/
double tmp_imopVarPre88;
/*[13, 14, 15]*/
double u21i;
/*[13, 14, 15]*/
double u31i;
/*[13, 14, 15]*/
double u41i;
/*[13, 14, 15]*/
double u51i;
/*[13, 14, 15]*/
double u21j;
/*[13, 14, 15]*/
double u31j;
/*[13, 14, 15]*/
double u41j;
/*[13, 14, 15]*/
double u51j;
/*[13, 14, 15]*/
double u21k;
/*[13, 14, 15]*/
double u31k;
/*[13, 14, 15]*/
double u41k;
/*[13, 14, 15]*/
double u51k;
/*[13, 14, 15]*/
double u21im1;
/*[13, 14, 15]*/
double u31im1;
/*[13, 14, 15]*/
double u41im1;
/*[13, 14, 15]*/
double u51im1;
/*[13, 14, 15]*/
double u21jm1;
/*[13, 14, 15]*/
double u31jm1;
/*[13, 14, 15]*/
double u41jm1;
/*[13, 14, 15]*/
double u51jm1;
/*[13, 14, 15]*/
double u21km1;
/*[13, 14, 15]*/
double u31km1;
/*[13, 14, 15]*/
double u41km1;
/*[13, 14, 15]*/
double u51km1;
/*[13, 14, 15]*/
#pragma omp for nowait
/*[13, 14, 15]*/
/*[13, 14, 15]*/
/*[13, 14, 15]*/
for (i_imopVarPre84 = 0; i_imopVarPre84 <= nx - 1; i_imopVarPre84++) {
/*[13, 14, 15]*/
/*[13, 14, 15]*/
/*[13, 14, 15]*/
/*[13, 14, 15]*/
/*[13, 14, 15]*/
for (j_imopVarPre85 = 0; j_imopVarPre85 <= ny - 1; j_imopVarPre85++) {
/*[13, 14, 15]*/
/*[13, 14, 15]*/
/*[13, 14, 15]*/
/*[13, 14, 15]*/
/*[13, 14, 15]*/
for (k_imopVarPre86 = 0; k_imopVarPre86 <= nz - 1; k_imopVarPre86++) {
/*[13, 14, 15]*/
/*[13, 14, 15]*/
/*[13, 14, 15]*/
/*[13, 14, 15]*/
/*[13, 14, 15]*/
for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) {
/*[13, 14, 15]*/
/*[13, 14, 15]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = -frct[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87];
}
}
}
}
/*[13, 14, 15]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 15]*/
#pragma omp barrier
/*[13, 14, 16]*/
L1 = 0;
/*[13, 14, 16]*/
L2 = nx - 1;
/*[13, 14, 16]*/
#pragma omp for nowait
/*[13, 14, 16]*/
/*[13, 14, 16]*/
/*[13, 14, 16]*/
for (i_imopVarPre84 = L1; i_imopVarPre84 <= L2; i_imopVarPre84++) {
/*[13, 14, 16]*/
/*[13, 14, 16]*/
/*[13, 14, 16]*/
/*[13, 14, 16]*/
/*[13, 14, 16]*/
for (j_imopVarPre85 = jst; j_imopVarPre85 <= jend; j_imopVarPre85++) {
/*[13, 14, 16]*/
/*[13, 14, 16]*/
/*[13, 14, 16]*/
/*[13, 14, 16]*/
/*[13, 14, 16]*/
for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) {
/*[13, 14, 16]*/
/*[13, 14, 16]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1];
/*[13, 14, 16]*/
u21 = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0];
/*[13, 14, 16]*/
q = 0.50 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0];
/*[13, 14, 16]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u21 + 0.40e+00 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - q);
/*[13, 14, 16]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u21;
/*[13, 14, 16]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u21;
/*[13, 14, 16]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = (1.40e+00 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - 0.40e+00 * q) * u21;
}
}
}
/*[13, 14, 16]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 16]*/
#pragma omp barrier
/*[13, 14, 17]*/
#pragma omp for nowait
/*[13, 14, 17]*/
/*[13, 14, 17]*/
/*[13, 14, 17]*/
for (j_imopVarPre85 = jst; j_imopVarPre85 <= jend; j_imopVarPre85++) {
/*[13, 14, 17]*/
/*[13, 14, 17]*/
/*[13, 14, 17]*/
/*[13, 14, 17]*/
/*[13, 14, 17]*/
for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) {
/*[13, 14, 17]*/
/*[13, 14, 17]*/
/*[13, 14, 17]*/
/*[13, 14, 17]*/
/*[13, 14, 17]*/
for (i_imopVarPre84 = ist; i_imopVarPre84 <= iend; i_imopVarPre84++) {
/*[13, 14, 17]*/
/*[13, 14, 17]*/
/*[13, 14, 17]*/
/*[13, 14, 17]*/
/*[13, 14, 17]*/
for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) {
/*[13, 14, 17]*/
/*[13, 14, 17]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - tx2 * (flux[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - flux[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]);
}
}
/*[13, 14, 17]*/
L2 = nx - 1;
/*[13, 14, 17]*/
/*[13, 14, 17]*/
/*[13, 14, 17]*/
/*[13, 14, 17]*/
for (i_imopVarPre84 = ist; i_imopVarPre84 <= L2; i_imopVarPre84++) {
/*[13, 14, 17]*/
/*[13, 14, 17]*/
tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0];
/*[13, 14, 17]*/
u21i = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1];
/*[13, 14, 17]*/
u31i = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2];
/*[13, 14, 17]*/
u41i = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3];
/*[13, 14, 17]*/
u51i = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4];
/*[13, 14, 17]*/
tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][0];
/*[13, 14, 17]*/
u21im1 = tmp_imopVarPre88 * u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][1];
/*[13, 14, 17]*/
u31im1 = tmp_imopVarPre88 * u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][2];
/*[13, 14, 17]*/
u41im1 = tmp_imopVarPre88 * u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][3];
/*[13, 14, 17]*/
u51im1 = tmp_imopVarPre88 * u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][4];
/*[13, 14, 17]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = (4.0 / 3.0) * tx3 * (u21i - u21im1);
/*[13, 14, 17]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = tx3 * (u31i - u31im1);
/*[13, 14, 17]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = tx3 * (u41i - u41im1);
/*[13, 14, 17]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tx3 * (((u21i * u21i) + (u31i * u31i) + (u41i * u41i)) - ((u21im1 * u21im1) + (u31im1 * u31im1) + (u41im1 * u41im1))) + (1.0 / 6.0) * tx3 * ((u21i * u21i) - (u21im1 * u21im1)) + 1.40e+00 * 1.40e+00 * tx3 * (u51i - u51im1);
}
/*[13, 14, 17]*/
/*[13, 14, 17]*/
/*[13, 14, 17]*/
/*[13, 14, 17]*/
for (i_imopVarPre84 = ist; i_imopVarPre84 <= iend; i_imopVarPre84++) {
/*[13, 14, 17]*/
/*[13, 14, 17]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + dx1 * tx1 * (u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][0] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][0]);
/*[13, 14, 17]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][1] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]) + dx2 * tx1 * (u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][1] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][1]);
/*[13, 14, 17]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][2] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]) + dx3 * tx1 * (u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][2] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][2]);
/*[13, 14, 17]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][3] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) + dx4 * tx1 * (u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][3] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][3]);
/*[13, 14, 17]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][4] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4]) + dx5 * tx1 * (u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][4] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][4]);
}
/*[13, 14, 17]*/
/*[13, 14, 17]*/
/*[13, 14, 17]*/
/*[13, 14, 17]*/
for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) {
/*[13, 14, 17]*/
/*[13, 14, 17]*/
rsd[1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (+5.0 * u[1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + u[3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]);
/*[13, 14, 17]*/
rsd[2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (-4.0 * u[1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + u[4][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]);
}
/*[13, 14, 17]*/
ist1 = 3;
/*[13, 14, 17]*/
iend1 = nx - 4;
/*[13, 14, 17]*/
/*[13, 14, 17]*/
/*[13, 14, 17]*/
/*[13, 14, 17]*/
for (i_imopVarPre84 = ist1; i_imopVarPre84 <= iend1; i_imopVarPre84++) {
/*[13, 14, 17]*/
/*[13, 14, 17]*/
/*[13, 14, 17]*/
/*[13, 14, 17]*/
/*[13, 14, 17]*/
for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) {
/*[13, 14, 17]*/
/*[13, 14, 17]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (u[i_imopVarPre84 - 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + u[i_imopVarPre84 + 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]);
}
}
/*[13, 14, 17]*/
/*[13, 14, 17]*/
/*[13, 14, 17]*/
/*[13, 14, 17]*/
for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) {
/*[13, 14, 17]*/
/*[13, 14, 17]*/
rsd[nx - 3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[nx - 3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (u[nx - 5][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[nx - 4][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[nx - 3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[nx - 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]);
/*[13, 14, 17]*/
rsd[nx - 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[nx - 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (u[nx - 4][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[nx - 3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + 5.0 * u[nx - 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]);
}
}
}
/*[13, 14, 17]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 17]*/
#pragma omp barrier
/*[13, 14, 18]*/
L1 = 0;
/*[13, 14, 18]*/
L2 = ny - 1;
/*[13, 14, 18]*/
#pragma omp for nowait
/*[13, 14, 18]*/
/*[13, 14, 18]*/
/*[13, 14, 18]*/
for (i_imopVarPre84 = ist; i_imopVarPre84 <= iend; i_imopVarPre84++) {
/*[13, 14, 18]*/
/*[13, 14, 18]*/
/*[13, 14, 18]*/
/*[13, 14, 18]*/
/*[13, 14, 18]*/
for (j_imopVarPre85 = L1; j_imopVarPre85 <= L2; j_imopVarPre85++) {
/*[13, 14, 18]*/
/*[13, 14, 18]*/
/*[13, 14, 18]*/
/*[13, 14, 18]*/
/*[13, 14, 18]*/
for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) {
/*[13, 14, 18]*/
/*[13, 14, 18]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2];
/*[13, 14, 18]*/
u31 = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0];
/*[13, 14, 18]*/
q = 0.50 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0];
/*[13, 14, 18]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u31;
/*[13, 14, 18]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u31 + 0.40e+00 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - q);
/*[13, 14, 18]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u31;
/*[13, 14, 18]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = (1.40e+00 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - 0.40e+00 * q) * u31;
}
}
}
/*[13, 14, 18]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 18]*/
#pragma omp barrier
/*[13, 14, 19]*/
#pragma omp for nowait
/*[13, 14, 19]*/
/*[13, 14, 19]*/
/*[13, 14, 19]*/
for (i_imopVarPre84 = ist; i_imopVarPre84 <= iend; i_imopVarPre84++) {
/*[13, 14, 19]*/
/*[13, 14, 19]*/
/*[13, 14, 19]*/
/*[13, 14, 19]*/
/*[13, 14, 19]*/
for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) {
/*[13, 14, 19]*/
/*[13, 14, 19]*/
/*[13, 14, 19]*/
/*[13, 14, 19]*/
/*[13, 14, 19]*/
for (j_imopVarPre85 = jst; j_imopVarPre85 <= jend; j_imopVarPre85++) {
/*[13, 14, 19]*/
/*[13, 14, 19]*/
/*[13, 14, 19]*/
/*[13, 14, 19]*/
/*[13, 14, 19]*/
for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) {
/*[13, 14, 19]*/
/*[13, 14, 19]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - ty2 * (flux[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][m_imopVarPre87] - flux[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][m_imopVarPre87]);
}
}
/*[13, 14, 19]*/
L2 = ny - 1;
/*[13, 14, 19]*/
/*[13, 14, 19]*/
/*[13, 14, 19]*/
/*[13, 14, 19]*/
for (j_imopVarPre85 = jst; j_imopVarPre85 <= L2; j_imopVarPre85++) {
/*[13, 14, 19]*/
/*[13, 14, 19]*/
tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0];
/*[13, 14, 19]*/
u21j = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1];
/*[13, 14, 19]*/
u31j = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2];
/*[13, 14, 19]*/
u41j = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3];
/*[13, 14, 19]*/
u51j = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4];
/*[13, 14, 19]*/
tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][0];
/*[13, 14, 19]*/
u21jm1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][1];
/*[13, 14, 19]*/
u31jm1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][2];
/*[13, 14, 19]*/
u41jm1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][3];
/*[13, 14, 19]*/
u51jm1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][4];
/*[13, 14, 19]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = ty3 * (u21j - u21jm1);
/*[13, 14, 19]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = (4.0 / 3.0) * ty3 * (u31j - u31jm1);
/*[13, 14, 19]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = ty3 * (u41j - u41jm1);
/*[13, 14, 19]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * ty3 * (((u21j * u21j) + (u31j * u31j) + (u41j * u41j)) - ((u21jm1 * u21jm1) + (u31jm1 * u31jm1) + (u41jm1 * u41jm1))) + (1.0 / 6.0) * ty3 * ((u31j * u31j) - (u31jm1 * u31jm1)) + 1.40e+00 * 1.40e+00 * ty3 * (u51j - u51jm1);
}
/*[13, 14, 19]*/
/*[13, 14, 19]*/
/*[13, 14, 19]*/
/*[13, 14, 19]*/
for (j_imopVarPre85 = jst; j_imopVarPre85 <= jend; j_imopVarPre85++) {
/*[13, 14, 19]*/
/*[13, 14, 19]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + dy1 * ty1 * (u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][0] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][0]);
/*[13, 14, 19]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][1] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]) + dy2 * ty1 * (u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][1] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][1]);
/*[13, 14, 19]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][2] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]) + dy3 * ty1 * (u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][2] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][2]);
/*[13, 14, 19]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][3] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) + dy4 * ty1 * (u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][3] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][3]);
/*[13, 14, 19]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][4] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4]) + dy5 * ty1 * (u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][4] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][4]);
}
/*[13, 14, 19]*/
/*[13, 14, 19]*/
/*[13, 14, 19]*/
/*[13, 14, 19]*/
for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) {
/*[13, 14, 19]*/
/*[13, 14, 19]*/
rsd[i_imopVarPre84][1][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][1][k_imopVarPre86][m_imopVarPre87] - dssp * (+5.0 * u[i_imopVarPre84][1][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][2][k_imopVarPre86][m_imopVarPre87] + u[i_imopVarPre84][3][k_imopVarPre86][m_imopVarPre87]);
/*[13, 14, 19]*/
rsd[i_imopVarPre84][2][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][2][k_imopVarPre86][m_imopVarPre87] - dssp * (-4.0 * u[i_imopVarPre84][1][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][2][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][3][k_imopVarPre86][m_imopVarPre87] + u[i_imopVarPre84][4][k_imopVarPre86][m_imopVarPre87]);
}
/*[13, 14, 19]*/
jst1 = 3;
/*[13, 14, 19]*/
jend1 = ny - 4;
/*[13, 14, 19]*/
/*[13, 14, 19]*/
/*[13, 14, 19]*/
/*[13, 14, 19]*/
for (j_imopVarPre85 = jst1; j_imopVarPre85 <= jend1; j_imopVarPre85++) {
/*[13, 14, 19]*/
/*[13, 14, 19]*/
/*[13, 14, 19]*/
/*[13, 14, 19]*/
/*[13, 14, 19]*/
for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) {
/*[13, 14, 19]*/
/*[13, 14, 19]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (u[i_imopVarPre84][j_imopVarPre85 - 2][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][m_imopVarPre87] + u[i_imopVarPre84][j_imopVarPre85 + 2][k_imopVarPre86][m_imopVarPre87]);
}
}
/*[13, 14, 19]*/
/*[13, 14, 19]*/
/*[13, 14, 19]*/
/*[13, 14, 19]*/
for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) {
/*[13, 14, 19]*/
/*[13, 14, 19]*/
rsd[i_imopVarPre84][ny - 3][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][ny - 3][k_imopVarPre86][m_imopVarPre87] - dssp * (u[i_imopVarPre84][ny - 5][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][ny - 4][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][ny - 3][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][ny - 2][k_imopVarPre86][m_imopVarPre87]);
/*[13, 14, 19]*/
rsd[i_imopVarPre84][ny - 2][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][ny - 2][k_imopVarPre86][m_imopVarPre87] - dssp * (u[i_imopVarPre84][ny - 4][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][ny - 3][k_imopVarPre86][m_imopVarPre87] + 5.0 * u[i_imopVarPre84][ny - 2][k_imopVarPre86][m_imopVarPre87]);
}
}
}
/*[13, 14, 19]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 19]*/
#pragma omp barrier
/*[13, 14, 20]*/
#pragma omp for nowait
/*[13, 14, 20]*/
/*[13, 14, 20]*/
/*[13, 14, 20]*/
for (i_imopVarPre84 = ist; i_imopVarPre84 <= iend; i_imopVarPre84++) {
/*[13, 14, 20]*/
/*[13, 14, 20]*/
/*[13, 14, 20]*/
/*[13, 14, 20]*/
/*[13, 14, 20]*/
for (j_imopVarPre85 = jst; j_imopVarPre85 <= jend; j_imopVarPre85++) {
/*[13, 14, 20]*/
/*[13, 14, 20]*/
/*[13, 14, 20]*/
/*[13, 14, 20]*/
/*[13, 14, 20]*/
for (k_imopVarPre86 = 0; k_imopVarPre86 <= nz - 1; k_imopVarPre86++) {
/*[13, 14, 20]*/
/*[13, 14, 20]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3];
/*[13, 14, 20]*/
u41 = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0];
/*[13, 14, 20]*/
q = 0.50 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0];
/*[13, 14, 20]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u41;
/*[13, 14, 20]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u41;
/*[13, 14, 20]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u41 + 0.40e+00 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - q);
/*[13, 14, 20]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = (1.40e+00 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - 0.40e+00 * q) * u41;
}
/*[13, 14, 20]*/
/*[13, 14, 20]*/
/*[13, 14, 20]*/
/*[13, 14, 20]*/
for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) {
/*[13, 14, 20]*/
/*[13, 14, 20]*/
/*[13, 14, 20]*/
/*[13, 14, 20]*/
/*[13, 14, 20]*/
for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) {
/*[13, 14, 20]*/
/*[13, 14, 20]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - tz2 * (flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][m_imopVarPre87] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][m_imopVarPre87]);
}
}
/*[13, 14, 20]*/
/*[13, 14, 20]*/
/*[13, 14, 20]*/
/*[13, 14, 20]*/
for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 1; k_imopVarPre86++) {
/*[13, 14, 20]*/
/*[13, 14, 20]*/
tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0];
/*[13, 14, 20]*/
u21k = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1];
/*[13, 14, 20]*/
u31k = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2];
/*[13, 14, 20]*/
u41k = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3];
/*[13, 14, 20]*/
u51k = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4];
/*[13, 14, 20]*/
tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][0];
/*[13, 14, 20]*/
u21km1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][1];
/*[13, 14, 20]*/
u31km1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][2];
/*[13, 14, 20]*/
u41km1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][3];
/*[13, 14, 20]*/
u51km1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][4];
/*[13, 14, 20]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = tz3 * (u21k - u21km1);
/*[13, 14, 20]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = tz3 * (u31k - u31km1);
/*[13, 14, 20]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = (4.0 / 3.0) * tz3 * (u41k - u41km1);
/*[13, 14, 20]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tz3 * (((u21k * u21k) + (u31k * u31k) + (u41k * u41k)) - ((u21km1 * u21km1) + (u31km1 * u31km1) + (u41km1 * u41km1))) + (1.0 / 6.0) * tz3 * ((u41k * u41k) - (u41km1 * u41km1)) + 1.40e+00 * 1.40e+00 * tz3 * (u51k - u51km1);
}
/*[13, 14, 20]*/
/*[13, 14, 20]*/
/*[13, 14, 20]*/
/*[13, 14, 20]*/
for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) {
/*[13, 14, 20]*/
/*[13, 14, 20]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + dz1 * tz1 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][0] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][0]);
/*[13, 14, 20]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][1] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]) + dz2 * tz1 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][1] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][1]);
/*[13, 14, 20]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][2] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]) + dz3 * tz1 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][2] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][2]);
/*[13, 14, 20]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][3] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) + dz4 * tz1 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][3] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][3]);
/*[13, 14, 20]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][4] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4]) + dz5 * tz1 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][4] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][4]);
}
/*[13, 14, 20]*/
/*[13, 14, 20]*/
/*[13, 14, 20]*/
/*[13, 14, 20]*/
for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) {
/*[13, 14, 20]*/
/*[13, 14, 20]*/
rsd[i_imopVarPre84][j_imopVarPre85][1][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][1][m_imopVarPre87] - dssp * (+5.0 * u[i_imopVarPre84][j_imopVarPre85][1][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][2][m_imopVarPre87] + u[i_imopVarPre84][j_imopVarPre85][3][m_imopVarPre87]);
/*[13, 14, 20]*/
rsd[i_imopVarPre84][j_imopVarPre85][2][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][2][m_imopVarPre87] - dssp * (-4.0 * u[i_imopVarPre84][j_imopVarPre85][1][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][j_imopVarPre85][2][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][3][m_imopVarPre87] + u[i_imopVarPre84][j_imopVarPre85][4][m_imopVarPre87]);
}
/*[13, 14, 20]*/
/*[13, 14, 20]*/
/*[13, 14, 20]*/
/*[13, 14, 20]*/
for (k_imopVarPre86 = 3; k_imopVarPre86 <= nz - 4; k_imopVarPre86++) {
/*[13, 14, 20]*/
/*[13, 14, 20]*/
/*[13, 14, 20]*/
/*[13, 14, 20]*/
/*[13, 14, 20]*/
for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) {
/*[13, 14, 20]*/
/*[13, 14, 20]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 2][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][m_imopVarPre87] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 2][m_imopVarPre87]);
}
}
/*[13, 14, 20]*/
/*[13, 14, 20]*/
/*[13, 14, 20]*/
/*[13, 14, 20]*/
for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) {
/*[13, 14, 20]*/
/*[13, 14, 20]*/
rsd[i_imopVarPre84][j_imopVarPre85][nz - 3][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][nz - 3][m_imopVarPre87] - dssp * (u[i_imopVarPre84][j_imopVarPre85][nz - 5][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][nz - 4][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][j_imopVarPre85][nz - 3][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][nz - 2][m_imopVarPre87]);
/*[13, 14, 20]*/
rsd[i_imopVarPre84][j_imopVarPre85][nz - 2][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][nz - 2][m_imopVarPre87] - dssp * (u[i_imopVarPre84][j_imopVarPre85][nz - 4][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][nz - 3][m_imopVarPre87] + 5.0 * u[i_imopVarPre84][j_imopVarPre85][nz - 2][m_imopVarPre87]);
}
}
}
}
/*[13, 14, 21]*/
#pragma omp parallel
{
/*[13, 14, 21]*/
/*[13, 14, 21]*/
double ( *v )[12 / 2 * 2 + 1][12 / 2 * 2 + 1][5];
/*[13, 14, 21]*/
double *sum;
/*[13, 14, 21]*/
v = rsd;
/*[13, 14, 21]*/
sum = rsdnm;
/*[13, 14, 21]*/
int i_imopVarPre75;
/*[13, 14, 21]*/
int j_imopVarPre76;
/*[13, 14, 21]*/
int k_imopVarPre77;
/*[13, 14, 21]*/
int m_imopVarPre78;
/*[13, 14, 21]*/
double sum0 = 0.0;
/*[13, 14, 21]*/
double sum1 = 0.0;
/*[13, 14, 21]*/
double sum2 = 0.0;
/*[13, 14, 21]*/
double sum3 = 0.0;
/*[13, 14, 21]*/
double sum4 = 0.0;
/*[13, 14, 21]*/
#pragma omp single nowait
{
/*[13, 14, 21]*/
/*[13, 14, 21]*/
/*[13, 14, 21]*/
/*[13, 14, 21]*/
/*[13, 14, 21]*/
for (m_imopVarPre78 = 0; m_imopVarPre78 < 5; m_imopVarPre78++) {
/*[13, 14, 21]*/
/*[13, 14, 21]*/
sum[m_imopVarPre78] = 0.0;
}
}
/*[13, 14, 21]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 21]*/
#pragma omp barrier
/*[13, 14, 22]*/
#pragma omp for nowait
/*[13, 14, 22]*/
/*[13, 14, 22]*/
/*[13, 14, 22]*/
for (i_imopVarPre75 = ist; i_imopVarPre75 <= iend; i_imopVarPre75++) {
/*[13, 14, 22]*/
/*[13, 14, 22]*/
/*[13, 14, 22]*/
/*[13, 14, 22]*/
/*[13, 14, 22]*/
for (j_imopVarPre76 = jst; j_imopVarPre76 <= jend; j_imopVarPre76++) {
/*[13, 14, 22]*/
/*[13, 14, 22]*/
/*[13, 14, 22]*/
/*[13, 14, 22]*/
/*[13, 14, 22]*/
for (k_imopVarPre77 = 1; k_imopVarPre77 <= nz0 - 2; k_imopVarPre77++) {
/*[13, 14, 22]*/
/*[13, 14, 22]*/
sum0 = sum0 + v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][0] * v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][0];
/*[13, 14, 22]*/
sum1 = sum1 + v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][1] * v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][1];
/*[13, 14, 22]*/
sum2 = sum2 + v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][2] * v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][2];
/*[13, 14, 22]*/
sum3 = sum3 + v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][3] * v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][3];
/*[13, 14, 22]*/
sum4 = sum4 + v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][4] * v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][4];
}
}
}
/*[13, 14, 22]*/
// #pragma omp dummyFlush CRITICAL_START
/*[13, 14, 22]*/
#pragma omp critical
{
/*[13, 14, 22]*/
/*[13, 14, 22]*/
sum[0] += sum0;
/*[13, 14, 22]*/
sum[1] += sum1;
/*[13, 14, 22]*/
sum[2] += sum2;
/*[13, 14, 22]*/
sum[3] += sum3;
/*[13, 14, 22]*/
sum[4] += sum4;
}
/*[13, 14, 22]*/
// #pragma omp dummyFlush CRITICAL_END
/*[13, 14, 22]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 22]*/
#pragma omp barrier
/*[13, 14, 23]*/
#pragma omp single nowait
{
/*[13, 14, 23]*/
/*[13, 14, 23]*/
/*[13, 14, 23]*/
/*[13, 14, 23]*/
/*[13, 14, 23]*/
for (m_imopVarPre78 = 0; m_imopVarPre78 < 5; m_imopVarPre78++) {
/*[13, 14, 23]*/
/*[13, 14, 23]*/
double _imopVarPre154;
/*[13, 14, 23]*/
double _imopVarPre155;
/*[13, 14, 23]*/
_imopVarPre154 = sum[m_imopVarPre78] / ((nx0 - 2) * (ny0 - 2) * (nz0 - 2));
/*[13, 14, 23]*/
_imopVarPre155 = sqrt(_imopVarPre154);
/*[13, 14, 23]*/
/*[13, 14, 23]*/
sum[m_imopVarPre78] = _imopVarPre155;
}
}
}
/*[13, 14]*/
timer_clear(1);
/*[13, 14]*/
/*[13, 14]*/
timer_start(1);
/*[13, 14]*/
/*[13, 14]*/
/*[13, 14]*/
/*[13, 14]*/
/*[13, 14]*/
for (istep = 1; istep <= itmax; istep++) {
/*[13, 14]*/
/*[13, 14]*/
int _imopVarPre372;
/*[13, 14]*/
int _imopVarPre370;
/*[13, 14]*/
int _imopVarPre371;
/*[13, 14]*/
_imopVarPre370 = istep % 20 == 0;
/*[13, 14]*/
/*[13, 14]*/
if (!_imopVarPre370) {
/*[13, 14]*/
/*[13, 14]*/
_imopVarPre371 = istep == itmax;
/*[13, 14]*/
/*[13, 14]*/
if (!_imopVarPre371) {
/*[13, 14]*/
/*[13, 14]*/
_imopVarPre371 = istep == 1;
}
/*[13, 14]*/
_imopVarPre370 = _imopVarPre371;
}
/*[13, 14]*/
/*[13, 14]*/
if (_imopVarPre370) {
/*[13, 14]*/
/*[13, 14]*/
#pragma omp master
{
/*[13, 14]*/
/*[13, 14]*/
printf(" Time step %4d\n", istep);
/*[13, 14]*/
}
}
/*[13, 14, 24]*/
#pragma omp parallel private(istep, i, j, k, m)
{
/*[13, 14, 24]*/
/*[13, 14, 24]*/
int _imopVarPre377;
/*[13, 14, 24]*/
int _imopVarPre378;
/*[13, 14, 24]*/
int _imopVarPre379;
/*[13, 14, 24]*/
int _imopVarPre380;
/*[13, 14, 24]*/
#pragma omp for nowait
/*[13, 14, 24]*/
/*[13, 14, 24]*/
/*[13, 14, 24]*/
for (i = ist; i <= iend; i++) {
/*[13, 14, 24]*/
/*[13, 14, 24]*/
/*[13, 14, 24]*/
/*[13, 14, 24]*/
/*[13, 14, 24]*/
for (j = jst; j <= jend; j++) {
/*[13, 14, 24]*/
/*[13, 14, 24]*/
/*[13, 14, 24]*/
/*[13, 14, 24]*/
/*[13, 14, 24]*/
for (k = 1; k <= nz - 2; k++) {
/*[13, 14, 24]*/
/*[13, 14, 24]*/
/*[13, 14, 24]*/
/*[13, 14, 24]*/
/*[13, 14, 24]*/
for (m = 0; m < 5; m++) {
/*[13, 14, 24]*/
/*[13, 14, 24]*/
rsd[i][j][k][m] = dt * rsd[i][j][k][m];
}
}
}
}
/*[13, 14, 24]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 24]*/
#pragma omp barrier
/*[13, 14, 25]*/
/*[13, 14, 25]*/
/*[13, 14, 25]*/
/*[13, 14, 25]*/
for (k = 1; k <= nz - 2; k++) {
/*[13, 14, 25]*/
/*[13, 14, 25]*/
jacld(k);
/*[13, 14, 25]*/
/*[13, 14, 25]*/
blts(nx, ny, nz, k, omega, rsd, a, b, c, d, ist, iend, jst, jend, nx0, ny0);
/*[13, 14, 25]*/
}
/*[13, 14, 25]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 25]*/
#pragma omp barrier
/*[13, 14, 26]*/
/*[13, 14, 26]*/
/*[13, 14, 26]*/
/*[13, 14, 26]*/
for (k = nz - 2; k >= 1; k--) {
/*[13, 14, 26]*/
/*[13, 14, 26]*/
jacu(k);
/*[13, 14, 26]*/
/*[13, 14, 26]*/
buts(nx, ny, nz, k, omega, rsd, tv, d, a, b, c, ist, iend, jst, jend, nx0, ny0);
/*[13, 14, 26]*/
}
/*[13, 14, 26]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 26]*/
#pragma omp barrier
/*[13, 14, 27]*/
#pragma omp for nowait
/*[13, 14, 27]*/
/*[13, 14, 27]*/
/*[13, 14, 27]*/
for (i = ist; i <= iend; i++) {
/*[13, 14, 27]*/
/*[13, 14, 27]*/
/*[13, 14, 27]*/
/*[13, 14, 27]*/
/*[13, 14, 27]*/
for (j = jst; j <= jend; j++) {
/*[13, 14, 27]*/
/*[13, 14, 27]*/
/*[13, 14, 27]*/
/*[13, 14, 27]*/
/*[13, 14, 27]*/
for (k = 1; k <= nz - 2; k++) {
/*[13, 14, 27]*/
/*[13, 14, 27]*/
/*[13, 14, 27]*/
/*[13, 14, 27]*/
/*[13, 14, 27]*/
for (m = 0; m < 5; m++) {
/*[13, 14, 27]*/
/*[13, 14, 27]*/
u[i][j][k][m] = u[i][j][k][m] + tmp * rsd[i][j][k][m];
}
}
}
}
/*[13, 14, 27]*/
/*[13, 14, 27]*/
if (istep % inorm == 0) {
/*[13, 14, 27]*/
/*[13, 14, 27]*/
double ( *v )[12 / 2 * 2 + 1][12 / 2 * 2 + 1][5];
/*[13, 14, 27]*/
double *sum;
/*[13, 14, 27]*/
v = rsd;
/*[13, 14, 27]*/
sum = delunm;
/*[13, 14, 27]*/
int i_imopVarPre89;
/*[13, 14, 27]*/
int j_imopVarPre90;
/*[13, 14, 27]*/
int k_imopVarPre91;
/*[13, 14, 27]*/
int m_imopVarPre92;
/*[13, 14, 27]*/
double sum0 = 0.0;
/*[13, 14, 27]*/
double sum1 = 0.0;
/*[13, 14, 27]*/
double sum2 = 0.0;
/*[13, 14, 27]*/
double sum3 = 0.0;
/*[13, 14, 27]*/
double sum4 = 0.0;
/*[13, 14, 27]*/
#pragma omp single nowait
{
/*[13, 14, 27]*/
/*[13, 14, 27]*/
/*[13, 14, 27]*/
/*[13, 14, 27]*/
/*[13, 14, 27]*/
for (m_imopVarPre92 = 0; m_imopVarPre92 < 5; m_imopVarPre92++) {
/*[13, 14, 27]*/
/*[13, 14, 27]*/
sum[m_imopVarPre92] = 0.0;
}
}
/*[13, 14, 27]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 27]*/
#pragma omp barrier
/*[13, 14, 28]*/
#pragma omp for nowait
/*[13, 14, 28]*/
/*[13, 14, 28]*/
/*[13, 14, 28]*/
for (i_imopVarPre89 = ist; i_imopVarPre89 <= iend; i_imopVarPre89++) {
/*[13, 14, 28]*/
/*[13, 14, 28]*/
/*[13, 14, 28]*/
/*[13, 14, 28]*/
/*[13, 14, 28]*/
for (j_imopVarPre90 = jst; j_imopVarPre90 <= jend; j_imopVarPre90++) {
/*[13, 14, 28]*/
/*[13, 14, 28]*/
/*[13, 14, 28]*/
/*[13, 14, 28]*/
/*[13, 14, 28]*/
for (k_imopVarPre91 = 1; k_imopVarPre91 <= nz0 - 2; k_imopVarPre91++) {
/*[13, 14, 28]*/
/*[13, 14, 28]*/
sum0 = sum0 + v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][0] * v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][0];
/*[13, 14, 28]*/
sum1 = sum1 + v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][1] * v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][1];
/*[13, 14, 28]*/
sum2 = sum2 + v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][2] * v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][2];
/*[13, 14, 28]*/
sum3 = sum3 + v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][3] * v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][3];
/*[13, 14, 28]*/
sum4 = sum4 + v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][4] * v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][4];
}
}
}
/*[13, 14, 28]*/
// #pragma omp dummyFlush CRITICAL_START
/*[13, 14, 28]*/
#pragma omp critical
{
/*[13, 14, 28]*/
/*[13, 14, 28]*/
sum[0] += sum0;
/*[13, 14, 28]*/
sum[1] += sum1;
/*[13, 14, 28]*/
sum[2] += sum2;
/*[13, 14, 28]*/
sum[3] += sum3;
/*[13, 14, 28]*/
sum[4] += sum4;
}
/*[13, 14, 28]*/
// #pragma omp dummyFlush CRITICAL_END
/*[13, 14, 28]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 28]*/
#pragma omp barrier
/*[13, 14, 29]*/
#pragma omp single nowait
{
/*[13, 14, 29]*/
/*[13, 14, 29]*/
/*[13, 14, 29]*/
/*[13, 14, 29]*/
/*[13, 14, 29]*/
for (m_imopVarPre92 = 0; m_imopVarPre92 < 5; m_imopVarPre92++) {
/*[13, 14, 29]*/
/*[13, 14, 29]*/
double _imopVarPre154;
/*[13, 14, 29]*/
double _imopVarPre155;
/*[13, 14, 29]*/
_imopVarPre154 = sum[m_imopVarPre92] / ((nx0 - 2) * (ny0 - 2) * (nz0 - 2));
/*[13, 14, 29]*/
_imopVarPre155 = sqrt(_imopVarPre154);
/*[13, 14, 29]*/
/*[13, 14, 29]*/
sum[m_imopVarPre92] = _imopVarPre155;
}
}
/*[13, 14, 29]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 29]*/
#pragma omp barrier
/*[13, 14, 30]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 30]*/
#pragma omp barrier
}
/*[13, 14, 27, 31]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 27, 31]*/
#pragma omp barrier
/*[13, 14, 28, 32]*/
int i_imopVarPre79;
/*[13, 14, 28, 32]*/
int j_imopVarPre80;
/*[13, 14, 28, 32]*/
int k_imopVarPre81;
/*[13, 14, 28, 32]*/
int m_imopVarPre82;
/*[13, 14, 28, 32]*/
int L1;
/*[13, 14, 28, 32]*/
int L2;
/*[13, 14, 28, 32]*/
int ist1;
/*[13, 14, 28, 32]*/
int iend1;
/*[13, 14, 28, 32]*/
int jst1;
/*[13, 14, 28, 32]*/
int jend1;
/*[13, 14, 28, 32]*/
double q;
/*[13, 14, 28, 32]*/
double u21;
/*[13, 14, 28, 32]*/
double u31;
/*[13, 14, 28, 32]*/
double u41;
/*[13, 14, 28, 32]*/
double tmp_imopVarPre83;
/*[13, 14, 28, 32]*/
double u21i;
/*[13, 14, 28, 32]*/
double u31i;
/*[13, 14, 28, 32]*/
double u41i;
/*[13, 14, 28, 32]*/
double u51i;
/*[13, 14, 28, 32]*/
double u21j;
/*[13, 14, 28, 32]*/
double u31j;
/*[13, 14, 28, 32]*/
double u41j;
/*[13, 14, 28, 32]*/
double u51j;
/*[13, 14, 28, 32]*/
double u21k;
/*[13, 14, 28, 32]*/
double u31k;
/*[13, 14, 28, 32]*/
double u41k;
/*[13, 14, 28, 32]*/
double u51k;
/*[13, 14, 28, 32]*/
double u21im1;
/*[13, 14, 28, 32]*/
double u31im1;
/*[13, 14, 28, 32]*/
double u41im1;
/*[13, 14, 28, 32]*/
double u51im1;
/*[13, 14, 28, 32]*/
double u21jm1;
/*[13, 14, 28, 32]*/
double u31jm1;
/*[13, 14, 28, 32]*/
double u41jm1;
/*[13, 14, 28, 32]*/
double u51jm1;
/*[13, 14, 28, 32]*/
double u21km1;
/*[13, 14, 28, 32]*/
double u31km1;
/*[13, 14, 28, 32]*/
double u41km1;
/*[13, 14, 28, 32]*/
double u51km1;
/*[13, 14, 28, 32]*/
#pragma omp for nowait
/*[13, 14, 28, 32]*/
/*[13, 14, 28, 32]*/
/*[13, 14, 28, 32]*/
for (i_imopVarPre79 = 0; i_imopVarPre79 <= nx - 1; i_imopVarPre79++) {
/*[13, 14, 28, 32]*/
/*[13, 14, 28, 32]*/
/*[13, 14, 28, 32]*/
/*[13, 14, 28, 32]*/
/*[13, 14, 28, 32]*/
for (j_imopVarPre80 = 0; j_imopVarPre80 <= ny - 1; j_imopVarPre80++) {
/*[13, 14, 28, 32]*/
/*[13, 14, 28, 32]*/
/*[13, 14, 28, 32]*/
/*[13, 14, 28, 32]*/
/*[13, 14, 28, 32]*/
for (k_imopVarPre81 = 0; k_imopVarPre81 <= nz - 1; k_imopVarPre81++) {
/*[13, 14, 28, 32]*/
/*[13, 14, 28, 32]*/
/*[13, 14, 28, 32]*/
/*[13, 14, 28, 32]*/
/*[13, 14, 28, 32]*/
for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) {
/*[13, 14, 28, 32]*/
/*[13, 14, 28, 32]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = -frct[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82];
}
}
}
}
/*[13, 14, 28, 32]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 28, 32]*/
#pragma omp barrier
/*[13, 14, 29, 33]*/
L1 = 0;
/*[13, 14, 29, 33]*/
L2 = nx - 1;
/*[13, 14, 29, 33]*/
#pragma omp for nowait
/*[13, 14, 29, 33]*/
/*[13, 14, 29, 33]*/
/*[13, 14, 29, 33]*/
for (i_imopVarPre79 = L1; i_imopVarPre79 <= L2; i_imopVarPre79++) {
/*[13, 14, 29, 33]*/
/*[13, 14, 29, 33]*/
/*[13, 14, 29, 33]*/
/*[13, 14, 29, 33]*/
/*[13, 14, 29, 33]*/
for (j_imopVarPre80 = jst; j_imopVarPre80 <= jend; j_imopVarPre80++) {
/*[13, 14, 29, 33]*/
/*[13, 14, 29, 33]*/
/*[13, 14, 29, 33]*/
/*[13, 14, 29, 33]*/
/*[13, 14, 29, 33]*/
for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) {
/*[13, 14, 29, 33]*/
/*[13, 14, 29, 33]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1];
/*[13, 14, 29, 33]*/
u21 = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0];
/*[13, 14, 29, 33]*/
q = 0.50 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0];
/*[13, 14, 29, 33]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u21 + 0.40e+00 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - q);
/*[13, 14, 29, 33]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u21;
/*[13, 14, 29, 33]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u21;
/*[13, 14, 29, 33]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = (1.40e+00 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - 0.40e+00 * q) * u21;
}
}
}
/*[13, 14, 29, 33]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 29, 33]*/
#pragma omp barrier
/*[13, 14, 30, 34]*/
#pragma omp for nowait
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
for (j_imopVarPre80 = jst; j_imopVarPre80 <= jend; j_imopVarPre80++) {
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) {
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
for (i_imopVarPre79 = ist; i_imopVarPre79 <= iend; i_imopVarPre79++) {
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) {
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - tx2 * (flux[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - flux[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]);
}
}
/*[13, 14, 30, 34]*/
L2 = nx - 1;
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
for (i_imopVarPre79 = ist; i_imopVarPre79 <= L2; i_imopVarPre79++) {
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0];
/*[13, 14, 30, 34]*/
u21i = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1];
/*[13, 14, 30, 34]*/
u31i = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2];
/*[13, 14, 30, 34]*/
u41i = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3];
/*[13, 14, 30, 34]*/
u51i = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4];
/*[13, 14, 30, 34]*/
tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][0];
/*[13, 14, 30, 34]*/
u21im1 = tmp_imopVarPre83 * u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][1];
/*[13, 14, 30, 34]*/
u31im1 = tmp_imopVarPre83 * u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][2];
/*[13, 14, 30, 34]*/
u41im1 = tmp_imopVarPre83 * u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][3];
/*[13, 14, 30, 34]*/
u51im1 = tmp_imopVarPre83 * u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][4];
/*[13, 14, 30, 34]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = (4.0 / 3.0) * tx3 * (u21i - u21im1);
/*[13, 14, 30, 34]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = tx3 * (u31i - u31im1);
/*[13, 14, 30, 34]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = tx3 * (u41i - u41im1);
/*[13, 14, 30, 34]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tx3 * (((u21i * u21i) + (u31i * u31i) + (u41i * u41i)) - ((u21im1 * u21im1) + (u31im1 * u31im1) + (u41im1 * u41im1))) + (1.0 / 6.0) * tx3 * ((u21i * u21i) - (u21im1 * u21im1)) + 1.40e+00 * 1.40e+00 * tx3 * (u51i - u51im1);
}
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
for (i_imopVarPre79 = ist; i_imopVarPre79 <= iend; i_imopVarPre79++) {
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + dx1 * tx1 * (u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][0] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][0]);
/*[13, 14, 30, 34]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][1] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]) + dx2 * tx1 * (u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][1] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][1]);
/*[13, 14, 30, 34]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][2] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]) + dx3 * tx1 * (u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][2] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][2]);
/*[13, 14, 30, 34]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][3] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) + dx4 * tx1 * (u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][3] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][3]);
/*[13, 14, 30, 34]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][4] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4]) + dx5 * tx1 * (u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][4] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][4]);
}
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) {
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
rsd[1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (+5.0 * u[1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + u[3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]);
/*[13, 14, 30, 34]*/
rsd[2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (-4.0 * u[1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + u[4][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]);
}
/*[13, 14, 30, 34]*/
ist1 = 3;
/*[13, 14, 30, 34]*/
iend1 = nx - 4;
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
for (i_imopVarPre79 = ist1; i_imopVarPre79 <= iend1; i_imopVarPre79++) {
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) {
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (u[i_imopVarPre79 - 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + u[i_imopVarPre79 + 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]);
}
}
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) {
/*[13, 14, 30, 34]*/
/*[13, 14, 30, 34]*/
rsd[nx - 3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[nx - 3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (u[nx - 5][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[nx - 4][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[nx - 3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[nx - 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]);
/*[13, 14, 30, 34]*/
rsd[nx - 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[nx - 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (u[nx - 4][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[nx - 3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + 5.0 * u[nx - 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]);
}
}
}
/*[13, 14, 30, 34]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 30, 34]*/
#pragma omp barrier
/*[13, 14, 31, 35]*/
L1 = 0;
/*[13, 14, 31, 35]*/
L2 = ny - 1;
/*[13, 14, 31, 35]*/
#pragma omp for nowait
/*[13, 14, 31, 35]*/
/*[13, 14, 31, 35]*/
/*[13, 14, 31, 35]*/
for (i_imopVarPre79 = ist; i_imopVarPre79 <= iend; i_imopVarPre79++) {
/*[13, 14, 31, 35]*/
/*[13, 14, 31, 35]*/
/*[13, 14, 31, 35]*/
/*[13, 14, 31, 35]*/
/*[13, 14, 31, 35]*/
for (j_imopVarPre80 = L1; j_imopVarPre80 <= L2; j_imopVarPre80++) {
/*[13, 14, 31, 35]*/
/*[13, 14, 31, 35]*/
/*[13, 14, 31, 35]*/
/*[13, 14, 31, 35]*/
/*[13, 14, 31, 35]*/
for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) {
/*[13, 14, 31, 35]*/
/*[13, 14, 31, 35]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2];
/*[13, 14, 31, 35]*/
u31 = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0];
/*[13, 14, 31, 35]*/
q = 0.50 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0];
/*[13, 14, 31, 35]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u31;
/*[13, 14, 31, 35]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u31 + 0.40e+00 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - q);
/*[13, 14, 31, 35]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u31;
/*[13, 14, 31, 35]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = (1.40e+00 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - 0.40e+00 * q) * u31;
}
}
}
/*[13, 14, 31, 35]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 31, 35]*/
#pragma omp barrier
/*[13, 14, 32, 36]*/
#pragma omp for nowait
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
for (i_imopVarPre79 = ist; i_imopVarPre79 <= iend; i_imopVarPre79++) {
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) {
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
for (j_imopVarPre80 = jst; j_imopVarPre80 <= jend; j_imopVarPre80++) {
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) {
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - ty2 * (flux[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][m_imopVarPre82] - flux[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][m_imopVarPre82]);
}
}
/*[13, 14, 32, 36]*/
L2 = ny - 1;
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
for (j_imopVarPre80 = jst; j_imopVarPre80 <= L2; j_imopVarPre80++) {
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0];
/*[13, 14, 32, 36]*/
u21j = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1];
/*[13, 14, 32, 36]*/
u31j = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2];
/*[13, 14, 32, 36]*/
u41j = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3];
/*[13, 14, 32, 36]*/
u51j = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4];
/*[13, 14, 32, 36]*/
tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][0];
/*[13, 14, 32, 36]*/
u21jm1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][1];
/*[13, 14, 32, 36]*/
u31jm1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][2];
/*[13, 14, 32, 36]*/
u41jm1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][3];
/*[13, 14, 32, 36]*/
u51jm1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][4];
/*[13, 14, 32, 36]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = ty3 * (u21j - u21jm1);
/*[13, 14, 32, 36]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = (4.0 / 3.0) * ty3 * (u31j - u31jm1);
/*[13, 14, 32, 36]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = ty3 * (u41j - u41jm1);
/*[13, 14, 32, 36]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * ty3 * (((u21j * u21j) + (u31j * u31j) + (u41j * u41j)) - ((u21jm1 * u21jm1) + (u31jm1 * u31jm1) + (u41jm1 * u41jm1))) + (1.0 / 6.0) * ty3 * ((u31j * u31j) - (u31jm1 * u31jm1)) + 1.40e+00 * 1.40e+00 * ty3 * (u51j - u51jm1);
}
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
for (j_imopVarPre80 = jst; j_imopVarPre80 <= jend; j_imopVarPre80++) {
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + dy1 * ty1 * (u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][0] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][0]);
/*[13, 14, 32, 36]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][1] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]) + dy2 * ty1 * (u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][1] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][1]);
/*[13, 14, 32, 36]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][2] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]) + dy3 * ty1 * (u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][2] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][2]);
/*[13, 14, 32, 36]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][3] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) + dy4 * ty1 * (u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][3] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][3]);
/*[13, 14, 32, 36]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][4] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4]) + dy5 * ty1 * (u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][4] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][4]);
}
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) {
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
rsd[i_imopVarPre79][1][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][1][k_imopVarPre81][m_imopVarPre82] - dssp * (+5.0 * u[i_imopVarPre79][1][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][2][k_imopVarPre81][m_imopVarPre82] + u[i_imopVarPre79][3][k_imopVarPre81][m_imopVarPre82]);
/*[13, 14, 32, 36]*/
rsd[i_imopVarPre79][2][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][2][k_imopVarPre81][m_imopVarPre82] - dssp * (-4.0 * u[i_imopVarPre79][1][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][2][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][3][k_imopVarPre81][m_imopVarPre82] + u[i_imopVarPre79][4][k_imopVarPre81][m_imopVarPre82]);
}
/*[13, 14, 32, 36]*/
jst1 = 3;
/*[13, 14, 32, 36]*/
jend1 = ny - 4;
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
for (j_imopVarPre80 = jst1; j_imopVarPre80 <= jend1; j_imopVarPre80++) {
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) {
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (u[i_imopVarPre79][j_imopVarPre80 - 2][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][m_imopVarPre82] + u[i_imopVarPre79][j_imopVarPre80 + 2][k_imopVarPre81][m_imopVarPre82]);
}
}
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) {
/*[13, 14, 32, 36]*/
/*[13, 14, 32, 36]*/
rsd[i_imopVarPre79][ny - 3][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][ny - 3][k_imopVarPre81][m_imopVarPre82] - dssp * (u[i_imopVarPre79][ny - 5][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][ny - 4][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][ny - 3][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][ny - 2][k_imopVarPre81][m_imopVarPre82]);
/*[13, 14, 32, 36]*/
rsd[i_imopVarPre79][ny - 2][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][ny - 2][k_imopVarPre81][m_imopVarPre82] - dssp * (u[i_imopVarPre79][ny - 4][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][ny - 3][k_imopVarPre81][m_imopVarPre82] + 5.0 * u[i_imopVarPre79][ny - 2][k_imopVarPre81][m_imopVarPre82]);
}
}
}
/*[13, 14, 32, 36]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 32, 36]*/
#pragma omp barrier
/*[13, 14, 33, 37]*/
#pragma omp for nowait
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
for (i_imopVarPre79 = ist; i_imopVarPre79 <= iend; i_imopVarPre79++) {
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
for (j_imopVarPre80 = jst; j_imopVarPre80 <= jend; j_imopVarPre80++) {
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
for (k_imopVarPre81 = 0; k_imopVarPre81 <= nz - 1; k_imopVarPre81++) {
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3];
/*[13, 14, 33, 37]*/
u41 = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0];
/*[13, 14, 33, 37]*/
q = 0.50 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0];
/*[13, 14, 33, 37]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u41;
/*[13, 14, 33, 37]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u41;
/*[13, 14, 33, 37]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u41 + 0.40e+00 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - q);
/*[13, 14, 33, 37]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = (1.40e+00 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - 0.40e+00 * q) * u41;
}
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) {
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) {
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - tz2 * (flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][m_imopVarPre82] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][m_imopVarPre82]);
}
}
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 1; k_imopVarPre81++) {
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0];
/*[13, 14, 33, 37]*/
u21k = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1];
/*[13, 14, 33, 37]*/
u31k = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2];
/*[13, 14, 33, 37]*/
u41k = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3];
/*[13, 14, 33, 37]*/
u51k = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4];
/*[13, 14, 33, 37]*/
tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][0];
/*[13, 14, 33, 37]*/
u21km1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][1];
/*[13, 14, 33, 37]*/
u31km1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][2];
/*[13, 14, 33, 37]*/
u41km1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][3];
/*[13, 14, 33, 37]*/
u51km1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][4];
/*[13, 14, 33, 37]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = tz3 * (u21k - u21km1);
/*[13, 14, 33, 37]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = tz3 * (u31k - u31km1);
/*[13, 14, 33, 37]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = (4.0 / 3.0) * tz3 * (u41k - u41km1);
/*[13, 14, 33, 37]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tz3 * (((u21k * u21k) + (u31k * u31k) + (u41k * u41k)) - ((u21km1 * u21km1) + (u31km1 * u31km1) + (u41km1 * u41km1))) + (1.0 / 6.0) * tz3 * ((u41k * u41k) - (u41km1 * u41km1)) + 1.40e+00 * 1.40e+00 * tz3 * (u51k - u51km1);
}
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) {
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + dz1 * tz1 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][0] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][0]);
/*[13, 14, 33, 37]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][1] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]) + dz2 * tz1 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][1] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][1]);
/*[13, 14, 33, 37]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][2] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]) + dz3 * tz1 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][2] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][2]);
/*[13, 14, 33, 37]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][3] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) + dz4 * tz1 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][3] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][3]);
/*[13, 14, 33, 37]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][4] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4]) + dz5 * tz1 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][4] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][4]);
}
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) {
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
rsd[i_imopVarPre79][j_imopVarPre80][1][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][1][m_imopVarPre82] - dssp * (+5.0 * u[i_imopVarPre79][j_imopVarPre80][1][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][2][m_imopVarPre82] + u[i_imopVarPre79][j_imopVarPre80][3][m_imopVarPre82]);
/*[13, 14, 33, 37]*/
rsd[i_imopVarPre79][j_imopVarPre80][2][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][2][m_imopVarPre82] - dssp * (-4.0 * u[i_imopVarPre79][j_imopVarPre80][1][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][j_imopVarPre80][2][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][3][m_imopVarPre82] + u[i_imopVarPre79][j_imopVarPre80][4][m_imopVarPre82]);
}
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
for (k_imopVarPre81 = 3; k_imopVarPre81 <= nz - 4; k_imopVarPre81++) {
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) {
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 2][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][m_imopVarPre82] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 2][m_imopVarPre82]);
}
}
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) {
/*[13, 14, 33, 37]*/
/*[13, 14, 33, 37]*/
rsd[i_imopVarPre79][j_imopVarPre80][nz - 3][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][nz - 3][m_imopVarPre82] - dssp * (u[i_imopVarPre79][j_imopVarPre80][nz - 5][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][nz - 4][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][j_imopVarPre80][nz - 3][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][nz - 2][m_imopVarPre82]);
/*[13, 14, 33, 37]*/
rsd[i_imopVarPre79][j_imopVarPre80][nz - 2][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][nz - 2][m_imopVarPre82] - dssp * (u[i_imopVarPre79][j_imopVarPre80][nz - 4][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][nz - 3][m_imopVarPre82] + 5.0 * u[i_imopVarPre79][j_imopVarPre80][nz - 2][m_imopVarPre82]);
}
}
}
/*[13, 14, 33, 37]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 33, 37]*/
#pragma omp barrier
/*[13, 14, 34, 38]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 34, 38]*/
#pragma omp barrier
/*[13, 14, 35, 39]*/
#pragma omp master
{
/*[13, 14, 35, 39]*/
/*[13, 14, 35, 39]*/
_imopVarPre372 = (istep % inorm == 0);
/*[13, 14, 35, 39]*/
/*[13, 14, 35, 39]*/
if (!_imopVarPre372) {
/*[13, 14, 35, 39]*/
/*[13, 14, 35, 39]*/
_imopVarPre372 = (istep == itmax);
}
}
/*[13, 14, 35, 39]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 35, 39]*/
#pragma omp barrier
/*[13, 14, 36, 40]*/
/*[13, 14, 36, 40]*/
if (_imopVarPre372) {
/*[13, 14, 36, 40]*/
/*[13, 14, 36, 40]*/
double ( *v )[12 / 2 * 2 + 1][12 / 2 * 2 + 1][5];
/*[13, 14, 36, 40]*/
double *sum;
/*[13, 14, 36, 40]*/
v = rsd;
/*[13, 14, 36, 40]*/
sum = rsdnm;
/*[13, 14, 36, 40]*/
int i_imopVarPre93;
/*[13, 14, 36, 40]*/
int j_imopVarPre94;
/*[13, 14, 36, 40]*/
int k_imopVarPre95;
/*[13, 14, 36, 40]*/
int m_imopVarPre96;
/*[13, 14, 36, 40]*/
double sum0 = 0.0;
/*[13, 14, 36, 40]*/
double sum1 = 0.0;
/*[13, 14, 36, 40]*/
double sum2 = 0.0;
/*[13, 14, 36, 40]*/
double sum3 = 0.0;
/*[13, 14, 36, 40]*/
double sum4 = 0.0;
/*[13, 14, 36, 40]*/
#pragma omp single nowait
{
/*[13, 14, 36, 40]*/
/*[13, 14, 36, 40]*/
/*[13, 14, 36, 40]*/
/*[13, 14, 36, 40]*/
/*[13, 14, 36, 40]*/
for (m_imopVarPre96 = 0; m_imopVarPre96 < 5; m_imopVarPre96++) {
/*[13, 14, 36, 40]*/
/*[13, 14, 36, 40]*/
sum[m_imopVarPre96] = 0.0;
}
}
/*[13, 14, 36, 40]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 36, 40]*/
#pragma omp barrier
/*[13, 14, 37]*/
#pragma omp for nowait
/*[13, 14, 37]*/
/*[13, 14, 37]*/
/*[13, 14, 37]*/
for (i_imopVarPre93 = ist; i_imopVarPre93 <= iend; i_imopVarPre93++) {
/*[13, 14, 37]*/
/*[13, 14, 37]*/
/*[13, 14, 37]*/
/*[13, 14, 37]*/
/*[13, 14, 37]*/
for (j_imopVarPre94 = jst; j_imopVarPre94 <= jend; j_imopVarPre94++) {
/*[13, 14, 37]*/
/*[13, 14, 37]*/
/*[13, 14, 37]*/
/*[13, 14, 37]*/
/*[13, 14, 37]*/
for (k_imopVarPre95 = 1; k_imopVarPre95 <= nz0 - 2; k_imopVarPre95++) {
/*[13, 14, 37]*/
/*[13, 14, 37]*/
sum0 = sum0 + v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][0] * v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][0];
/*[13, 14, 37]*/
sum1 = sum1 + v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][1] * v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][1];
/*[13, 14, 37]*/
sum2 = sum2 + v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][2] * v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][2];
/*[13, 14, 37]*/
sum3 = sum3 + v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][3] * v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][3];
/*[13, 14, 37]*/
sum4 = sum4 + v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][4] * v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][4];
}
}
}
/*[13, 14, 37]*/
// #pragma omp dummyFlush CRITICAL_START
/*[13, 14, 37]*/
#pragma omp critical
{
/*[13, 14, 37]*/
/*[13, 14, 37]*/
sum[0] += sum0;
/*[13, 14, 37]*/
sum[1] += sum1;
/*[13, 14, 37]*/
sum[2] += sum2;
/*[13, 14, 37]*/
sum[3] += sum3;
/*[13, 14, 37]*/
sum[4] += sum4;
}
/*[13, 14, 37]*/
// #pragma omp dummyFlush CRITICAL_END
/*[13, 14, 37]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 37]*/
#pragma omp barrier
/*[13, 14, 38]*/
#pragma omp single nowait
{
/*[13, 14, 38]*/
/*[13, 14, 38]*/
/*[13, 14, 38]*/
/*[13, 14, 38]*/
/*[13, 14, 38]*/
for (m_imopVarPre96 = 0; m_imopVarPre96 < 5; m_imopVarPre96++) {
/*[13, 14, 38]*/
/*[13, 14, 38]*/
double _imopVarPre154;
/*[13, 14, 38]*/
double _imopVarPre155;
/*[13, 14, 38]*/
_imopVarPre154 = sum[m_imopVarPre96] / ((nx0 - 2) * (ny0 - 2) * (nz0 - 2));
/*[13, 14, 38]*/
_imopVarPre155 = sqrt(_imopVarPre154);
/*[13, 14, 38]*/
/*[13, 14, 38]*/
sum[m_imopVarPre96] = _imopVarPre155;
}
}
/*[13, 14, 38]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 38]*/
#pragma omp barrier
/*[13, 14, 39]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 39]*/
#pragma omp barrier
}
/*[13, 14, 36, 40]*/
// #pragma omp dummyFlush BARRIER_START
/*[13, 14, 36, 40]*/
#pragma omp barrier
/*[13, 14, 37]*/
#pragma omp master
{
/*[13, 14, 37]*/
/*[13, 14, 37]*/
_imopVarPre377 = (rsdnm[0] < tolrsd[0]);
/*[13, 14, 37]*/
/*[13, 14, 37]*/
if (_imopVarPre377) {
/*[13, 14, 37]*/
/*[13, 14, 37]*/
_imopVarPre378 = (rsdnm[1] < tolrsd[1]);
/*[13, 14, 37]*/
/*[13, 14, 37]*/
if (_imopVarPre378) {
/*[13, 14, 37]*/
/*[13, 14, 37]*/
_imopVarPre379 = (rsdnm[2] < tolrsd[2]);
/*[13, 14, 37]*/
/*[13, 14, 37]*/
if (_imopVarPre379) {
/*[13, 14, 37]*/
/*[13, 14, 37]*/
_imopVarPre380 = (rsdnm[3] < tolrsd[3]);
/*[13, 14, 37]*/
/*[13, 14, 37]*/
if (_imopVarPre380) {
/*[13, 14, 37]*/
/*[13, 14, 37]*/
_imopVarPre380 = (rsdnm[4] < tolrsd[4]);
}
/*[13, 14, 37]*/
_imopVarPre379 = _imopVarPre380;
}
/*[13, 14, 37]*/
_imopVarPre378 = _imopVarPre379;
}
/*[13, 14, 37]*/
_imopVarPre377 = _imopVarPre378;
}
/*[13, 14, 37]*/
/*[13, 14, 37]*/
if (_imopVarPre377) {
/*[13, 14, 37]*/
/*[13, 14, 37]*/
exit(1);
/*[13, 14, 37]*/
}
}
}
}
/*[13, 14]*/
timer_stop(1);
/*[13, 14]*/
/*[13, 14]*/
maxtime = timer_read(1);
/*[13, 14]*/
/*[]*/
error();
/*[]*/
/*[]*/
pintgr();
/*[]*/
/*[]*/
int *_imopVarPre144;
/*[]*/
char *_imopVarPre145;
/*[]*/
_imopVarPre144 = &verified;
/*[]*/
_imopVarPre145 = &class;
/*[]*/
verify(rsdnm, errnm, frc, _imopVarPre145, _imopVarPre144);
/*[]*/
/*[]*/
mflops = (double) itmax * (1984.77 * (double) nx0 * (double) ny0 * (double) nz0 - 10923.3 * (((double) (nx0 + ny0 + nz0) / 3.0) * ((double) (nx0 + ny0 + nz0) / 3.0)) + 27770.9 * (double) (nx0 + ny0 + nz0) / 3.0 - 144010.0) / (maxtime * 1000000.0);
/*[]*/
c_print_results("LU", class, nx0, ny0, nz0, itmax, nthreads, maxtime, mflops, " floating point", verified, "3.0 structured", "21 Jul 2017", "gcc", "gcc", "(none)", "-I../common", "-O3 -fopenmp", "-O3 -fopenmp", "(none)");
/*[]*/
}
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
static void blts(int nx, int ny , int nz , int k , double omega , double v[12][12 / 2 * 2 + 1][12 / 2 * 2 + 1][5] , double ldz[12][12][5][5] , double ldy[12][12][5][5] , double ldx[12][12][5][5] , double d[12][12][5][5] , int ist , int iend , int jst , int jend , int nx0 , int ny0) {
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
int i;
/*[13, 14, 25, 41]*/
int j;
/*[13, 14, 25, 41]*/
int m;
/*[13, 14, 25, 41]*/
double tmp;
/*[13, 14, 25, 41]*/
double tmp1;
/*[13, 14, 25, 41]*/
double tmat[5][5];
/*[13, 14, 25, 41]*/
#pragma omp for nowait schedule(static)
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
for (i = ist; i <= iend; i++) {
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
for (j = jst; j <= jend; j++) {
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
for (m = 0; m < 5; m++) {
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
v[i][j][k][m] = v[i][j][k][m] - omega * (ldz[i][j][m][0] * v[i][j][k - 1][0] + ldz[i][j][m][1] * v[i][j][k - 1][1] + ldz[i][j][m][2] * v[i][j][k - 1][2] + ldz[i][j][m][3] * v[i][j][k - 1][3] + ldz[i][j][m][4] * v[i][j][k - 1][4]);
}
}
}
/*[13, 14, 25, 41]*/
#pragma omp for nowait schedule(static)
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
for (i = ist; i <= iend; i++) {
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
if (i != ist) {
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
while (flag[i - 1] == 0) {
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
// #pragma omp dummyFlush FLUSH_START
/*[13, 14, 25, 41]*/
#pragma omp flush(flag)
/*[13, 14, 25, 41]*/
;
}
}
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
if (i != iend) {
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
while (flag[i] == 1) {
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
// #pragma omp dummyFlush FLUSH_START
/*[13, 14, 25, 41]*/
#pragma omp flush(flag)
/*[13, 14, 25, 41]*/
;
}
}
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
for (j = jst; j <= jend; j++) {
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
for (m = 0; m < 5; m++) {
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
v[i][j][k][m] = v[i][j][k][m] - omega * (ldy[i][j][m][0] * v[i][j - 1][k][0] + ldx[i][j][m][0] * v[i - 1][j][k][0] + ldy[i][j][m][1] * v[i][j - 1][k][1] + ldx[i][j][m][1] * v[i - 1][j][k][1] + ldy[i][j][m][2] * v[i][j - 1][k][2] + ldx[i][j][m][2] * v[i - 1][j][k][2] + ldy[i][j][m][3] * v[i][j - 1][k][3] + ldx[i][j][m][3] * v[i - 1][j][k][3] + ldy[i][j][m][4] * v[i][j - 1][k][4] + ldx[i][j][m][4] * v[i - 1][j][k][4]);
}
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
for (m = 0; m < 5; m++) {
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
tmat[m][0] = d[i][j][m][0];
/*[13, 14, 25, 41]*/
tmat[m][1] = d[i][j][m][1];
/*[13, 14, 25, 41]*/
tmat[m][2] = d[i][j][m][2];
/*[13, 14, 25, 41]*/
tmat[m][3] = d[i][j][m][3];
/*[13, 14, 25, 41]*/
tmat[m][4] = d[i][j][m][4];
}
/*[13, 14, 25, 41]*/
tmp1 = 1.0 / tmat[0][0];
/*[13, 14, 25, 41]*/
tmp = tmp1 * tmat[1][0];
/*[13, 14, 25, 41]*/
tmat[1][1] = tmat[1][1] - tmp * tmat[0][1];
/*[13, 14, 25, 41]*/
tmat[1][2] = tmat[1][2] - tmp * tmat[0][2];
/*[13, 14, 25, 41]*/
tmat[1][3] = tmat[1][3] - tmp * tmat[0][3];
/*[13, 14, 25, 41]*/
tmat[1][4] = tmat[1][4] - tmp * tmat[0][4];
/*[13, 14, 25, 41]*/
v[i][j][k][1] = v[i][j][k][1] - v[i][j][k][0] * tmp;
/*[13, 14, 25, 41]*/
tmp = tmp1 * tmat[2][0];
/*[13, 14, 25, 41]*/
tmat[2][1] = tmat[2][1] - tmp * tmat[0][1];
/*[13, 14, 25, 41]*/
tmat[2][2] = tmat[2][2] - tmp * tmat[0][2];
/*[13, 14, 25, 41]*/
tmat[2][3] = tmat[2][3] - tmp * tmat[0][3];
/*[13, 14, 25, 41]*/
tmat[2][4] = tmat[2][4] - tmp * tmat[0][4];
/*[13, 14, 25, 41]*/
v[i][j][k][2] = v[i][j][k][2] - v[i][j][k][0] * tmp;
/*[13, 14, 25, 41]*/
tmp = tmp1 * tmat[3][0];
/*[13, 14, 25, 41]*/
tmat[3][1] = tmat[3][1] - tmp * tmat[0][1];
/*[13, 14, 25, 41]*/
tmat[3][2] = tmat[3][2] - tmp * tmat[0][2];
/*[13, 14, 25, 41]*/
tmat[3][3] = tmat[3][3] - tmp * tmat[0][3];
/*[13, 14, 25, 41]*/
tmat[3][4] = tmat[3][4] - tmp * tmat[0][4];
/*[13, 14, 25, 41]*/
v[i][j][k][3] = v[i][j][k][3] - v[i][j][k][0] * tmp;
/*[13, 14, 25, 41]*/
tmp = tmp1 * tmat[4][0];
/*[13, 14, 25, 41]*/
tmat[4][1] = tmat[4][1] - tmp * tmat[0][1];
/*[13, 14, 25, 41]*/
tmat[4][2] = tmat[4][2] - tmp * tmat[0][2];
/*[13, 14, 25, 41]*/
tmat[4][3] = tmat[4][3] - tmp * tmat[0][3];
/*[13, 14, 25, 41]*/
tmat[4][4] = tmat[4][4] - tmp * tmat[0][4];
/*[13, 14, 25, 41]*/
v[i][j][k][4] = v[i][j][k][4] - v[i][j][k][0] * tmp;
/*[13, 14, 25, 41]*/
tmp1 = 1.0 / tmat[1][1];
/*[13, 14, 25, 41]*/
tmp = tmp1 * tmat[2][1];
/*[13, 14, 25, 41]*/
tmat[2][2] = tmat[2][2] - tmp * tmat[1][2];
/*[13, 14, 25, 41]*/
tmat[2][3] = tmat[2][3] - tmp * tmat[1][3];
/*[13, 14, 25, 41]*/
tmat[2][4] = tmat[2][4] - tmp * tmat[1][4];
/*[13, 14, 25, 41]*/
v[i][j][k][2] = v[i][j][k][2] - v[i][j][k][1] * tmp;
/*[13, 14, 25, 41]*/
tmp = tmp1 * tmat[3][1];
/*[13, 14, 25, 41]*/
tmat[3][2] = tmat[3][2] - tmp * tmat[1][2];
/*[13, 14, 25, 41]*/
tmat[3][3] = tmat[3][3] - tmp * tmat[1][3];
/*[13, 14, 25, 41]*/
tmat[3][4] = tmat[3][4] - tmp * tmat[1][4];
/*[13, 14, 25, 41]*/
v[i][j][k][3] = v[i][j][k][3] - v[i][j][k][1] * tmp;
/*[13, 14, 25, 41]*/
tmp = tmp1 * tmat[4][1];
/*[13, 14, 25, 41]*/
tmat[4][2] = tmat[4][2] - tmp * tmat[1][2];
/*[13, 14, 25, 41]*/
tmat[4][3] = tmat[4][3] - tmp * tmat[1][3];
/*[13, 14, 25, 41]*/
tmat[4][4] = tmat[4][4] - tmp * tmat[1][4];
/*[13, 14, 25, 41]*/
v[i][j][k][4] = v[i][j][k][4] - v[i][j][k][1] * tmp;
/*[13, 14, 25, 41]*/
tmp1 = 1.0 / tmat[2][2];
/*[13, 14, 25, 41]*/
tmp = tmp1 * tmat[3][2];
/*[13, 14, 25, 41]*/
tmat[3][3] = tmat[3][3] - tmp * tmat[2][3];
/*[13, 14, 25, 41]*/
tmat[3][4] = tmat[3][4] - tmp * tmat[2][4];
/*[13, 14, 25, 41]*/
v[i][j][k][3] = v[i][j][k][3] - v[i][j][k][2] * tmp;
/*[13, 14, 25, 41]*/
tmp = tmp1 * tmat[4][2];
/*[13, 14, 25, 41]*/
tmat[4][3] = tmat[4][3] - tmp * tmat[2][3];
/*[13, 14, 25, 41]*/
tmat[4][4] = tmat[4][4] - tmp * tmat[2][4];
/*[13, 14, 25, 41]*/
v[i][j][k][4] = v[i][j][k][4] - v[i][j][k][2] * tmp;
/*[13, 14, 25, 41]*/
tmp1 = 1.0 / tmat[3][3];
/*[13, 14, 25, 41]*/
tmp = tmp1 * tmat[4][3];
/*[13, 14, 25, 41]*/
tmat[4][4] = tmat[4][4] - tmp * tmat[3][4];
/*[13, 14, 25, 41]*/
v[i][j][k][4] = v[i][j][k][4] - v[i][j][k][3] * tmp;
/*[13, 14, 25, 41]*/
v[i][j][k][4] = v[i][j][k][4] / tmat[4][4];
/*[13, 14, 25, 41]*/
v[i][j][k][3] = v[i][j][k][3] - tmat[3][4] * v[i][j][k][4];
/*[13, 14, 25, 41]*/
v[i][j][k][3] = v[i][j][k][3] / tmat[3][3];
/*[13, 14, 25, 41]*/
v[i][j][k][2] = v[i][j][k][2] - tmat[2][3] * v[i][j][k][3] - tmat[2][4] * v[i][j][k][4];
/*[13, 14, 25, 41]*/
v[i][j][k][2] = v[i][j][k][2] / tmat[2][2];
/*[13, 14, 25, 41]*/
v[i][j][k][1] = v[i][j][k][1] - tmat[1][2] * v[i][j][k][2] - tmat[1][3] * v[i][j][k][3] - tmat[1][4] * v[i][j][k][4];
/*[13, 14, 25, 41]*/
v[i][j][k][1] = v[i][j][k][1] / tmat[1][1];
/*[13, 14, 25, 41]*/
v[i][j][k][0] = v[i][j][k][0] - tmat[0][1] * v[i][j][k][1] - tmat[0][2] * v[i][j][k][2] - tmat[0][3] * v[i][j][k][3] - tmat[0][4] * v[i][j][k][4];
/*[13, 14, 25, 41]*/
v[i][j][k][0] = v[i][j][k][0] / tmat[0][0];
}
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
if (i != ist) {
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
flag[i - 1] = 0;
}
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
if (i != iend) {
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
flag[i] = 1;
}
/*[13, 14, 25, 41]*/
// #pragma omp dummyFlush FLUSH_START
/*[13, 14, 25, 41]*/
#pragma omp flush(flag)
}
}
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
static void buts(int nx, int ny , int nz , int k , double omega , double v[12][12 / 2 * 2 + 1][12 / 2 * 2 + 1][5] , double tv[12][12][5] , double d[12][12][5][5] , double udx[12][12][5][5] , double udy[12][12][5][5] , double udz[12][12][5][5] , int ist , int iend , int jst , int jend , int nx0 , int ny0) {
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
int i;
/*[13, 14, 26, 42]*/
int j;
/*[13, 14, 26, 42]*/
int m;
/*[13, 14, 26, 42]*/
double tmp;
/*[13, 14, 26, 42]*/
double tmp1;
/*[13, 14, 26, 42]*/
double tmat[5][5];
/*[13, 14, 26, 42]*/
#pragma omp for nowait schedule(static)
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
for (i = iend; i >= ist; i--) {
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
for (j = jend; j >= jst; j--) {
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
for (m = 0; m < 5; m++) {
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
tv[i][j][m] = omega * (udz[i][j][m][0] * v[i][j][k + 1][0] + udz[i][j][m][1] * v[i][j][k + 1][1] + udz[i][j][m][2] * v[i][j][k + 1][2] + udz[i][j][m][3] * v[i][j][k + 1][3] + udz[i][j][m][4] * v[i][j][k + 1][4]);
}
}
}
/*[13, 14, 26, 42]*/
#pragma omp for nowait schedule(static)
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
for (i = iend; i >= ist; i--) {
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
if (i != iend) {
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
while (flag[i + 1] == 0) {
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
// #pragma omp dummyFlush FLUSH_START
/*[13, 14, 26, 42]*/
#pragma omp flush(flag)
/*[13, 14, 26, 42]*/
;
}
}
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
if (i != ist) {
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
while (flag[i] == 1) {
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
// #pragma omp dummyFlush FLUSH_START
/*[13, 14, 26, 42]*/
#pragma omp flush(flag)
/*[13, 14, 26, 42]*/
;
}
}
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
for (j = jend; j >= jst; j--) {
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
for (m = 0; m < 5; m++) {
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
tv[i][j][m] = tv[i][j][m] + omega * (udy[i][j][m][0] * v[i][j + 1][k][0] + udx[i][j][m][0] * v[i + 1][j][k][0] + udy[i][j][m][1] * v[i][j + 1][k][1] + udx[i][j][m][1] * v[i + 1][j][k][1] + udy[i][j][m][2] * v[i][j + 1][k][2] + udx[i][j][m][2] * v[i + 1][j][k][2] + udy[i][j][m][3] * v[i][j + 1][k][3] + udx[i][j][m][3] * v[i + 1][j][k][3] + udy[i][j][m][4] * v[i][j + 1][k][4] + udx[i][j][m][4] * v[i + 1][j][k][4]);
}
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
for (m = 0; m < 5; m++) {
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
tmat[m][0] = d[i][j][m][0];
/*[13, 14, 26, 42]*/
tmat[m][1] = d[i][j][m][1];
/*[13, 14, 26, 42]*/
tmat[m][2] = d[i][j][m][2];
/*[13, 14, 26, 42]*/
tmat[m][3] = d[i][j][m][3];
/*[13, 14, 26, 42]*/
tmat[m][4] = d[i][j][m][4];
}
/*[13, 14, 26, 42]*/
tmp1 = 1.0 / tmat[0][0];
/*[13, 14, 26, 42]*/
tmp = tmp1 * tmat[1][0];
/*[13, 14, 26, 42]*/
tmat[1][1] = tmat[1][1] - tmp * tmat[0][1];
/*[13, 14, 26, 42]*/
tmat[1][2] = tmat[1][2] - tmp * tmat[0][2];
/*[13, 14, 26, 42]*/
tmat[1][3] = tmat[1][3] - tmp * tmat[0][3];
/*[13, 14, 26, 42]*/
tmat[1][4] = tmat[1][4] - tmp * tmat[0][4];
/*[13, 14, 26, 42]*/
tv[i][j][1] = tv[i][j][1] - tv[i][j][0] * tmp;
/*[13, 14, 26, 42]*/
tmp = tmp1 * tmat[2][0];
/*[13, 14, 26, 42]*/
tmat[2][1] = tmat[2][1] - tmp * tmat[0][1];
/*[13, 14, 26, 42]*/
tmat[2][2] = tmat[2][2] - tmp * tmat[0][2];
/*[13, 14, 26, 42]*/
tmat[2][3] = tmat[2][3] - tmp * tmat[0][3];
/*[13, 14, 26, 42]*/
tmat[2][4] = tmat[2][4] - tmp * tmat[0][4];
/*[13, 14, 26, 42]*/
tv[i][j][2] = tv[i][j][2] - tv[i][j][0] * tmp;
/*[13, 14, 26, 42]*/
tmp = tmp1 * tmat[3][0];
/*[13, 14, 26, 42]*/
tmat[3][1] = tmat[3][1] - tmp * tmat[0][1];
/*[13, 14, 26, 42]*/
tmat[3][2] = tmat[3][2] - tmp * tmat[0][2];
/*[13, 14, 26, 42]*/
tmat[3][3] = tmat[3][3] - tmp * tmat[0][3];
/*[13, 14, 26, 42]*/
tmat[3][4] = tmat[3][4] - tmp * tmat[0][4];
/*[13, 14, 26, 42]*/
tv[i][j][3] = tv[i][j][3] - tv[i][j][0] * tmp;
/*[13, 14, 26, 42]*/
tmp = tmp1 * tmat[4][0];
/*[13, 14, 26, 42]*/
tmat[4][1] = tmat[4][1] - tmp * tmat[0][1];
/*[13, 14, 26, 42]*/
tmat[4][2] = tmat[4][2] - tmp * tmat[0][2];
/*[13, 14, 26, 42]*/
tmat[4][3] = tmat[4][3] - tmp * tmat[0][3];
/*[13, 14, 26, 42]*/
tmat[4][4] = tmat[4][4] - tmp * tmat[0][4];
/*[13, 14, 26, 42]*/
tv[i][j][4] = tv[i][j][4] - tv[i][j][0] * tmp;
/*[13, 14, 26, 42]*/
tmp1 = 1.0 / tmat[1][1];
/*[13, 14, 26, 42]*/
tmp = tmp1 * tmat[2][1];
/*[13, 14, 26, 42]*/
tmat[2][2] = tmat[2][2] - tmp * tmat[1][2];
/*[13, 14, 26, 42]*/
tmat[2][3] = tmat[2][3] - tmp * tmat[1][3];
/*[13, 14, 26, 42]*/
tmat[2][4] = tmat[2][4] - tmp * tmat[1][4];
/*[13, 14, 26, 42]*/
tv[i][j][2] = tv[i][j][2] - tv[i][j][1] * tmp;
/*[13, 14, 26, 42]*/
tmp = tmp1 * tmat[3][1];
/*[13, 14, 26, 42]*/
tmat[3][2] = tmat[3][2] - tmp * tmat[1][2];
/*[13, 14, 26, 42]*/
tmat[3][3] = tmat[3][3] - tmp * tmat[1][3];
/*[13, 14, 26, 42]*/
tmat[3][4] = tmat[3][4] - tmp * tmat[1][4];
/*[13, 14, 26, 42]*/
tv[i][j][3] = tv[i][j][3] - tv[i][j][1] * tmp;
/*[13, 14, 26, 42]*/
tmp = tmp1 * tmat[4][1];
/*[13, 14, 26, 42]*/
tmat[4][2] = tmat[4][2] - tmp * tmat[1][2];
/*[13, 14, 26, 42]*/
tmat[4][3] = tmat[4][3] - tmp * tmat[1][3];
/*[13, 14, 26, 42]*/
tmat[4][4] = tmat[4][4] - tmp * tmat[1][4];
/*[13, 14, 26, 42]*/
tv[i][j][4] = tv[i][j][4] - tv[i][j][1] * tmp;
/*[13, 14, 26, 42]*/
tmp1 = 1.0 / tmat[2][2];
/*[13, 14, 26, 42]*/
tmp = tmp1 * tmat[3][2];
/*[13, 14, 26, 42]*/
tmat[3][3] = tmat[3][3] - tmp * tmat[2][3];
/*[13, 14, 26, 42]*/
tmat[3][4] = tmat[3][4] - tmp * tmat[2][4];
/*[13, 14, 26, 42]*/
tv[i][j][3] = tv[i][j][3] - tv[i][j][2] * tmp;
/*[13, 14, 26, 42]*/
tmp = tmp1 * tmat[4][2];
/*[13, 14, 26, 42]*/
tmat[4][3] = tmat[4][3] - tmp * tmat[2][3];
/*[13, 14, 26, 42]*/
tmat[4][4] = tmat[4][4] - tmp * tmat[2][4];
/*[13, 14, 26, 42]*/
tv[i][j][4] = tv[i][j][4] - tv[i][j][2] * tmp;
/*[13, 14, 26, 42]*/
tmp1 = 1.0 / tmat[3][3];
/*[13, 14, 26, 42]*/
tmp = tmp1 * tmat[4][3];
/*[13, 14, 26, 42]*/
tmat[4][4] = tmat[4][4] - tmp * tmat[3][4];
/*[13, 14, 26, 42]*/
tv[i][j][4] = tv[i][j][4] - tv[i][j][3] * tmp;
/*[13, 14, 26, 42]*/
tv[i][j][4] = tv[i][j][4] / tmat[4][4];
/*[13, 14, 26, 42]*/
tv[i][j][3] = tv[i][j][3] - tmat[3][4] * tv[i][j][4];
/*[13, 14, 26, 42]*/
tv[i][j][3] = tv[i][j][3] / tmat[3][3];
/*[13, 14, 26, 42]*/
tv[i][j][2] = tv[i][j][2] - tmat[2][3] * tv[i][j][3] - tmat[2][4] * tv[i][j][4];
/*[13, 14, 26, 42]*/
tv[i][j][2] = tv[i][j][2] / tmat[2][2];
/*[13, 14, 26, 42]*/
tv[i][j][1] = tv[i][j][1] - tmat[1][2] * tv[i][j][2] - tmat[1][3] * tv[i][j][3] - tmat[1][4] * tv[i][j][4];
/*[13, 14, 26, 42]*/
tv[i][j][1] = tv[i][j][1] / tmat[1][1];
/*[13, 14, 26, 42]*/
tv[i][j][0] = tv[i][j][0] - tmat[0][1] * tv[i][j][1] - tmat[0][2] * tv[i][j][2] - tmat[0][3] * tv[i][j][3] - tmat[0][4] * tv[i][j][4];
/*[13, 14, 26, 42]*/
tv[i][j][0] = tv[i][j][0] / tmat[0][0];
/*[13, 14, 26, 42]*/
v[i][j][k][0] = v[i][j][k][0] - tv[i][j][0];
/*[13, 14, 26, 42]*/
v[i][j][k][1] = v[i][j][k][1] - tv[i][j][1];
/*[13, 14, 26, 42]*/
v[i][j][k][2] = v[i][j][k][2] - tv[i][j][2];
/*[13, 14, 26, 42]*/
v[i][j][k][3] = v[i][j][k][3] - tv[i][j][3];
/*[13, 14, 26, 42]*/
v[i][j][k][4] = v[i][j][k][4] - tv[i][j][4];
}
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
if (i != iend) {
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
flag[i + 1] = 0;
}
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
if (i != ist) {
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
flag[i] = 1;
}
/*[13, 14, 26, 42]*/
// #pragma omp dummyFlush FLUSH_START
/*[13, 14, 26, 42]*/
#pragma omp flush(flag)
}
}
/*[]*/
static void domain() {
/*[]*/
/*[]*/
nx = nx0;
/*[]*/
ny = ny0;
/*[]*/
nz = nz0;
/*[]*/
int _imopVarPre146;
/*[]*/
int _imopVarPre147;
/*[]*/
_imopVarPre146 = nx < 4;
/*[]*/
/*[]*/
if (!_imopVarPre146) {
/*[]*/
/*[]*/
_imopVarPre147 = ny < 4;
/*[]*/
/*[]*/
if (!_imopVarPre147) {
/*[]*/
/*[]*/
_imopVarPre147 = nz < 4;
}
/*[]*/
_imopVarPre146 = _imopVarPre147;
}
/*[]*/
/*[]*/
if (_imopVarPre146) {
/*[]*/
/*[]*/
printf(" SUBDOMAIN SIZE IS TOO SMALL - \n" " ADJUST PROBLEM SIZE OR NUMBER OF PROCESSORS\n" " SO THAT NX, NY AND NZ ARE GREATER THAN OR EQUAL\n" " TO 4 THEY ARE CURRENTLY%3d%3d%3d\n", nx, ny, nz);
/*[]*/
/*[]*/
exit(1);
/*[]*/
}
/*[]*/
int _imopVarPre148;
/*[]*/
int _imopVarPre149;
/*[]*/
_imopVarPre148 = nx > 12;
/*[]*/
/*[]*/
if (!_imopVarPre148) {
/*[]*/
/*[]*/
_imopVarPre149 = ny > 12;
/*[]*/
/*[]*/
if (!_imopVarPre149) {
/*[]*/
/*[]*/
_imopVarPre149 = nz > 12;
}
/*[]*/
_imopVarPre148 = _imopVarPre149;
}
/*[]*/
/*[]*/
if (_imopVarPre148) {
/*[]*/
/*[]*/
printf(" SUBDOMAIN SIZE IS TOO LARGE - \n" " ADJUST PROBLEM SIZE OR NUMBER OF PROCESSORS\n" " SO THAT NX, NY AND NZ ARE LESS THAN OR EQUAL TO \n" " ISIZ1, ISIZ2 AND ISIZ3 RESPECTIVELY. THEY ARE\n" " CURRENTLY%4d%4d%4d\n", nx, ny, nz);
/*[]*/
/*[]*/
exit(1);
/*[]*/
}
/*[]*/
ist = 1;
/*[]*/
iend = nx - 2;
/*[]*/
jst = 1;
/*[]*/
jend = ny - 2;
}
/*[]*/
static void erhs() {
/*[]*/
/*[43]*/
#pragma omp parallel
{
/*[43]*/
/*[43]*/
int i;
/*[43]*/
int j;
/*[43]*/
int k;
/*[43]*/
int m;
/*[43]*/
int iglob;
/*[43]*/
int jglob;
/*[43]*/
int L1;
/*[43]*/
int L2;
/*[43]*/
int ist1;
/*[43]*/
int iend1;
/*[43]*/
int jst1;
/*[43]*/
int jend1;
/*[43]*/
double dsspm;
/*[43]*/
double xi;
/*[43]*/
double eta;
/*[43]*/
double zeta;
/*[43]*/
double q;
/*[43]*/
double u21;
/*[43]*/
double u31;
/*[43]*/
double u41;
/*[43]*/
double tmp;
/*[43]*/
double u21i;
/*[43]*/
double u31i;
/*[43]*/
double u41i;
/*[43]*/
double u51i;
/*[43]*/
double u21j;
/*[43]*/
double u31j;
/*[43]*/
double u41j;
/*[43]*/
double u51j;
/*[43]*/
double u21k;
/*[43]*/
double u31k;
/*[43]*/
double u41k;
/*[43]*/
double u51k;
/*[43]*/
double u21im1;
/*[43]*/
double u31im1;
/*[43]*/
double u41im1;
/*[43]*/
double u51im1;
/*[43]*/
double u21jm1;
/*[43]*/
double u31jm1;
/*[43]*/
double u41jm1;
/*[43]*/
double u51jm1;
/*[43]*/
double u21km1;
/*[43]*/
double u31km1;
/*[43]*/
double u41km1;
/*[43]*/
double u51km1;
/*[43]*/
dsspm = dssp;
/*[43]*/
#pragma omp for nowait
/*[43]*/
/*[43]*/
/*[43]*/
for (i = 0; i < nx; i++) {
/*[43]*/
/*[43]*/
/*[43]*/
/*[43]*/
/*[43]*/
for (j = 0; j < ny; j++) {
/*[43]*/
/*[43]*/
/*[43]*/
/*[43]*/
/*[43]*/
for (k = 0; k < nz; k++) {
/*[43]*/
/*[43]*/
/*[43]*/
/*[43]*/
/*[43]*/
for (m = 0; m < 5; m++) {
/*[43]*/
/*[43]*/
frct[i][j][k][m] = 0.0;
}
}
}
}
/*[43]*/
#pragma omp for nowait
/*[43]*/
/*[43]*/
/*[43]*/
for (i = 0; i < nx; i++) {
/*[43]*/
/*[43]*/
iglob = i;
/*[43]*/
xi = ((double) iglob) / (nx0 - 1);
/*[43]*/
/*[43]*/
/*[43]*/
/*[43]*/
for (j = 0; j < ny; j++) {
/*[43]*/
/*[43]*/
jglob = j;
/*[43]*/
eta = ((double) jglob) / (ny0 - 1);
/*[43]*/
/*[43]*/
/*[43]*/
/*[43]*/
for (k = 0; k < nz; k++) {
/*[43]*/
/*[43]*/
zeta = ((double) k) / (nz - 1);
/*[43]*/
/*[43]*/
/*[43]*/
/*[43]*/
for (m = 0; m < 5; m++) {
/*[43]*/
/*[43]*/
rsd[i][j][k][m] = ce[m][0] + ce[m][1] * xi + ce[m][2] * eta + ce[m][3] * zeta + ce[m][4] * xi * xi + ce[m][5] * eta * eta + ce[m][6] * zeta * zeta + ce[m][7] * xi * xi * xi + ce[m][8] * eta * eta * eta + ce[m][9] * zeta * zeta * zeta + ce[m][10] * xi * xi * xi * xi + ce[m][11] * eta * eta * eta * eta + ce[m][12] * zeta * zeta * zeta * zeta;
}
}
}
}
/*[43]*/
// #pragma omp dummyFlush BARRIER_START
/*[43]*/
#pragma omp barrier
/*[44]*/
L1 = 0;
/*[44]*/
L2 = nx - 1;
/*[44]*/
#pragma omp for nowait
/*[44]*/
/*[44]*/
/*[44]*/
for (i = L1; i <= L2; i++) {
/*[44]*/
/*[44]*/
/*[44]*/
/*[44]*/
/*[44]*/
for (j = jst; j <= jend; j++) {
/*[44]*/
/*[44]*/
/*[44]*/
/*[44]*/
/*[44]*/
for (k = 1; k < nz - 1; k++) {
/*[44]*/
/*[44]*/
flux[i][j][k][0] = rsd[i][j][k][1];
/*[44]*/
u21 = rsd[i][j][k][1] / rsd[i][j][k][0];
/*[44]*/
q = 0.50 * (rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3]) / rsd[i][j][k][0];
/*[44]*/
flux[i][j][k][1] = rsd[i][j][k][1] * u21 + 0.40e+00 * (rsd[i][j][k][4] - q);
/*[44]*/
flux[i][j][k][2] = rsd[i][j][k][2] * u21;
/*[44]*/
flux[i][j][k][3] = rsd[i][j][k][3] * u21;
/*[44]*/
flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u21;
}
}
}
/*[44]*/
// #pragma omp dummyFlush BARRIER_START
/*[44]*/
#pragma omp barrier
/*[45]*/
#pragma omp for nowait
/*[45]*/
/*[45]*/
/*[45]*/
for (j = jst; j <= jend; j++) {
/*[45]*/
/*[45]*/
/*[45]*/
/*[45]*/
/*[45]*/
for (k = 1; k <= nz - 2; k++) {
/*[45]*/
/*[45]*/
/*[45]*/
/*[45]*/
/*[45]*/
for (i = ist; i <= iend; i++) {
/*[45]*/
/*[45]*/
/*[45]*/
/*[45]*/
/*[45]*/
for (m = 0; m < 5; m++) {
/*[45]*/
/*[45]*/
frct[i][j][k][m] = frct[i][j][k][m] - tx2 * (flux[i + 1][j][k][m] - flux[i - 1][j][k][m]);
}
}
/*[45]*/
/*[45]*/
/*[45]*/
/*[45]*/
for (i = ist; i <= L2; i++) {
/*[45]*/
/*[45]*/
tmp = 1.0 / rsd[i][j][k][0];
/*[45]*/
u21i = tmp * rsd[i][j][k][1];
/*[45]*/
u31i = tmp * rsd[i][j][k][2];
/*[45]*/
u41i = tmp * rsd[i][j][k][3];
/*[45]*/
u51i = tmp * rsd[i][j][k][4];
/*[45]*/
tmp = 1.0 / rsd[i - 1][j][k][0];
/*[45]*/
u21im1 = tmp * rsd[i - 1][j][k][1];
/*[45]*/
u31im1 = tmp * rsd[i - 1][j][k][2];
/*[45]*/
u41im1 = tmp * rsd[i - 1][j][k][3];
/*[45]*/
u51im1 = tmp * rsd[i - 1][j][k][4];
/*[45]*/
flux[i][j][k][1] = (4.0 / 3.0) * tx3 * (u21i - u21im1);
/*[45]*/
flux[i][j][k][2] = tx3 * (u31i - u31im1);
/*[45]*/
flux[i][j][k][3] = tx3 * (u41i - u41im1);
/*[45]*/
flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tx3 * ((u21i * u21i + u31i * u31i + u41i * u41i) - (u21im1 * u21im1 + u31im1 * u31im1 + u41im1 * u41im1)) + (1.0 / 6.0) * tx3 * (u21i * u21i - u21im1 * u21im1) + 1.40e+00 * 1.40e+00 * tx3 * (u51i - u51im1);
}
/*[45]*/
/*[45]*/
/*[45]*/
/*[45]*/
for (i = ist; i <= iend; i++) {
/*[45]*/
/*[45]*/
frct[i][j][k][0] = frct[i][j][k][0] + dx1 * tx1 * (rsd[i - 1][j][k][0] - 2.0 * rsd[i][j][k][0] + rsd[i + 1][j][k][0]);
/*[45]*/
frct[i][j][k][1] = frct[i][j][k][1] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][1] - flux[i][j][k][1]) + dx2 * tx1 * (rsd[i - 1][j][k][1] - 2.0 * rsd[i][j][k][1] + rsd[i + 1][j][k][1]);
/*[45]*/
frct[i][j][k][2] = frct[i][j][k][2] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][2] - flux[i][j][k][2]) + dx3 * tx1 * (rsd[i - 1][j][k][2] - 2.0 * rsd[i][j][k][2] + rsd[i + 1][j][k][2]);
/*[45]*/
frct[i][j][k][3] = frct[i][j][k][3] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][3] - flux[i][j][k][3]) + dx4 * tx1 * (rsd[i - 1][j][k][3] - 2.0 * rsd[i][j][k][3] + rsd[i + 1][j][k][3]);
/*[45]*/
frct[i][j][k][4] = frct[i][j][k][4] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][4] - flux[i][j][k][4]) + dx5 * tx1 * (rsd[i - 1][j][k][4] - 2.0 * rsd[i][j][k][4] + rsd[i + 1][j][k][4]);
}
/*[45]*/
/*[45]*/
/*[45]*/
/*[45]*/
for (m = 0; m < 5; m++) {
/*[45]*/
/*[45]*/
frct[1][j][k][m] = frct[1][j][k][m] - dsspm * (+5.0 * rsd[1][j][k][m] - 4.0 * rsd[2][j][k][m] + rsd[3][j][k][m]);
/*[45]*/
frct[2][j][k][m] = frct[2][j][k][m] - dsspm * (-4.0 * rsd[1][j][k][m] + 6.0 * rsd[2][j][k][m] - 4.0 * rsd[3][j][k][m] + rsd[4][j][k][m]);
}
/*[45]*/
ist1 = 3;
/*[45]*/
iend1 = nx - 4;
/*[45]*/
/*[45]*/
/*[45]*/
/*[45]*/
for (i = ist1; i <= iend1; i++) {
/*[45]*/
/*[45]*/
/*[45]*/
/*[45]*/
/*[45]*/
for (m = 0; m < 5; m++) {
/*[45]*/
/*[45]*/
frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i - 2][j][k][m] - 4.0 * rsd[i - 1][j][k][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i + 1][j][k][m] + rsd[i + 2][j][k][m]);
}
}
/*[45]*/
/*[45]*/
/*[45]*/
/*[45]*/
for (m = 0; m < 5; m++) {
/*[45]*/
/*[45]*/
frct[nx - 3][j][k][m] = frct[nx - 3][j][k][m] - dsspm * (rsd[nx - 5][j][k][m] - 4.0 * rsd[nx - 4][j][k][m] + 6.0 * rsd[nx - 3][j][k][m] - 4.0 * rsd[nx - 2][j][k][m]);
/*[45]*/
frct[nx - 2][j][k][m] = frct[nx - 2][j][k][m] - dsspm * (rsd[nx - 4][j][k][m] - 4.0 * rsd[nx - 3][j][k][m] + 5.0 * rsd[nx - 2][j][k][m]);
}
}
}
/*[45]*/
// #pragma omp dummyFlush BARRIER_START
/*[45]*/
#pragma omp barrier
/*[46]*/
L1 = 0;
/*[46]*/
L2 = ny - 1;
/*[46]*/
#pragma omp for nowait
/*[46]*/
/*[46]*/
/*[46]*/
for (i = ist; i <= iend; i++) {
/*[46]*/
/*[46]*/
/*[46]*/
/*[46]*/
/*[46]*/
for (j = L1; j <= L2; j++) {
/*[46]*/
/*[46]*/
/*[46]*/
/*[46]*/
/*[46]*/
for (k = 1; k <= nz - 2; k++) {
/*[46]*/
/*[46]*/
flux[i][j][k][0] = rsd[i][j][k][2];
/*[46]*/
u31 = rsd[i][j][k][2] / rsd[i][j][k][0];
/*[46]*/
q = 0.50 * (rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3]) / rsd[i][j][k][0];
/*[46]*/
flux[i][j][k][1] = rsd[i][j][k][1] * u31;
/*[46]*/
flux[i][j][k][2] = rsd[i][j][k][2] * u31 + 0.40e+00 * (rsd[i][j][k][4] - q);
/*[46]*/
flux[i][j][k][3] = rsd[i][j][k][3] * u31;
/*[46]*/
flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u31;
}
}
}
/*[46]*/
// #pragma omp dummyFlush BARRIER_START
/*[46]*/
#pragma omp barrier
/*[47]*/
#pragma omp for nowait
/*[47]*/
/*[47]*/
/*[47]*/
for (i = ist; i <= iend; i++) {
/*[47]*/
/*[47]*/
/*[47]*/
/*[47]*/
/*[47]*/
for (k = 1; k <= nz - 2; k++) {
/*[47]*/
/*[47]*/
/*[47]*/
/*[47]*/
/*[47]*/
for (j = jst; j <= jend; j++) {
/*[47]*/
/*[47]*/
/*[47]*/
/*[47]*/
/*[47]*/
for (m = 0; m < 5; m++) {
/*[47]*/
/*[47]*/
frct[i][j][k][m] = frct[i][j][k][m] - ty2 * (flux[i][j + 1][k][m] - flux[i][j - 1][k][m]);
}
}
/*[47]*/
/*[47]*/
/*[47]*/
/*[47]*/
for (j = jst; j <= L2; j++) {
/*[47]*/
/*[47]*/
tmp = 1.0 / rsd[i][j][k][0];
/*[47]*/
u21j = tmp * rsd[i][j][k][1];
/*[47]*/
u31j = tmp * rsd[i][j][k][2];
/*[47]*/
u41j = tmp * rsd[i][j][k][3];
/*[47]*/
u51j = tmp * rsd[i][j][k][4];
/*[47]*/
tmp = 1.0 / rsd[i][j - 1][k][0];
/*[47]*/
u21jm1 = tmp * rsd[i][j - 1][k][1];
/*[47]*/
u31jm1 = tmp * rsd[i][j - 1][k][2];
/*[47]*/
u41jm1 = tmp * rsd[i][j - 1][k][3];
/*[47]*/
u51jm1 = tmp * rsd[i][j - 1][k][4];
/*[47]*/
flux[i][j][k][1] = ty3 * (u21j - u21jm1);
/*[47]*/
flux[i][j][k][2] = (4.0 / 3.0) * ty3 * (u31j - u31jm1);
/*[47]*/
flux[i][j][k][3] = ty3 * (u41j - u41jm1);
/*[47]*/
flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * ty3 * ((u21j * u21j + u31j * u31j + u41j * u41j) - (u21jm1 * u21jm1 + u31jm1 * u31jm1 + u41jm1 * u41jm1)) + (1.0 / 6.0) * ty3 * (u31j * u31j - u31jm1 * u31jm1) + 1.40e+00 * 1.40e+00 * ty3 * (u51j - u51jm1);
}
/*[47]*/
/*[47]*/
/*[47]*/
/*[47]*/
for (j = jst; j <= jend; j++) {
/*[47]*/
/*[47]*/
frct[i][j][k][0] = frct[i][j][k][0] + dy1 * ty1 * (rsd[i][j - 1][k][0] - 2.0 * rsd[i][j][k][0] + rsd[i][j + 1][k][0]);
/*[47]*/
frct[i][j][k][1] = frct[i][j][k][1] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][1] - flux[i][j][k][1]) + dy2 * ty1 * (rsd[i][j - 1][k][1] - 2.0 * rsd[i][j][k][1] + rsd[i][j + 1][k][1]);
/*[47]*/
frct[i][j][k][2] = frct[i][j][k][2] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][2] - flux[i][j][k][2]) + dy3 * ty1 * (rsd[i][j - 1][k][2] - 2.0 * rsd[i][j][k][2] + rsd[i][j + 1][k][2]);
/*[47]*/
frct[i][j][k][3] = frct[i][j][k][3] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][3] - flux[i][j][k][3]) + dy4 * ty1 * (rsd[i][j - 1][k][3] - 2.0 * rsd[i][j][k][3] + rsd[i][j + 1][k][3]);
/*[47]*/
frct[i][j][k][4] = frct[i][j][k][4] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][4] - flux[i][j][k][4]) + dy5 * ty1 * (rsd[i][j - 1][k][4] - 2.0 * rsd[i][j][k][4] + rsd[i][j + 1][k][4]);
}
/*[47]*/
/*[47]*/
/*[47]*/
/*[47]*/
for (m = 0; m < 5; m++) {
/*[47]*/
/*[47]*/
frct[i][1][k][m] = frct[i][1][k][m] - dsspm * (+5.0 * rsd[i][1][k][m] - 4.0 * rsd[i][2][k][m] + rsd[i][3][k][m]);
/*[47]*/
frct[i][2][k][m] = frct[i][2][k][m] - dsspm * (-4.0 * rsd[i][1][k][m] + 6.0 * rsd[i][2][k][m] - 4.0 * rsd[i][3][k][m] + rsd[i][4][k][m]);
}
/*[47]*/
jst1 = 3;
/*[47]*/
jend1 = ny - 4;
/*[47]*/
/*[47]*/
/*[47]*/
/*[47]*/
for (j = jst1; j <= jend1; j++) {
/*[47]*/
/*[47]*/
/*[47]*/
/*[47]*/
/*[47]*/
for (m = 0; m < 5; m++) {
/*[47]*/
/*[47]*/
frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i][j - 2][k][m] - 4.0 * rsd[i][j - 1][k][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i][j + 1][k][m] + rsd[i][j + 2][k][m]);
}
}
/*[47]*/
/*[47]*/
/*[47]*/
/*[47]*/
for (m = 0; m < 5; m++) {
/*[47]*/
/*[47]*/
frct[i][ny - 3][k][m] = frct[i][ny - 3][k][m] - dsspm * (rsd[i][ny - 5][k][m] - 4.0 * rsd[i][ny - 4][k][m] + 6.0 * rsd[i][ny - 3][k][m] - 4.0 * rsd[i][ny - 2][k][m]);
/*[47]*/
frct[i][ny - 2][k][m] = frct[i][ny - 2][k][m] - dsspm * (rsd[i][ny - 4][k][m] - 4.0 * rsd[i][ny - 3][k][m] + 5.0 * rsd[i][ny - 2][k][m]);
}
}
}
/*[47]*/
// #pragma omp dummyFlush BARRIER_START
/*[47]*/
#pragma omp barrier
/*[48]*/
#pragma omp for nowait
/*[48]*/
/*[48]*/
/*[48]*/
for (i = ist; i <= iend; i++) {
/*[48]*/
/*[48]*/
/*[48]*/
/*[48]*/
/*[48]*/
for (j = jst; j <= jend; j++) {
/*[48]*/
/*[48]*/
/*[48]*/
/*[48]*/
/*[48]*/
for (k = 0; k <= nz - 1; k++) {
/*[48]*/
/*[48]*/
flux[i][j][k][0] = rsd[i][j][k][3];
/*[48]*/
u41 = rsd[i][j][k][3] / rsd[i][j][k][0];
/*[48]*/
q = 0.50 * (rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3]) / rsd[i][j][k][0];
/*[48]*/
flux[i][j][k][1] = rsd[i][j][k][1] * u41;
/*[48]*/
flux[i][j][k][2] = rsd[i][j][k][2] * u41;
/*[48]*/
flux[i][j][k][3] = rsd[i][j][k][3] * u41 + 0.40e+00 * (rsd[i][j][k][4] - q);
/*[48]*/
flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u41;
}
/*[48]*/
/*[48]*/
/*[48]*/
/*[48]*/
for (k = 1; k <= nz - 2; k++) {
/*[48]*/
/*[48]*/
/*[48]*/
/*[48]*/
/*[48]*/
for (m = 0; m < 5; m++) {
/*[48]*/
/*[48]*/
frct[i][j][k][m] = frct[i][j][k][m] - tz2 * (flux[i][j][k + 1][m] - flux[i][j][k - 1][m]);
}
}
/*[48]*/
/*[48]*/
/*[48]*/
/*[48]*/
for (k = 1; k <= nz - 1; k++) {
/*[48]*/
/*[48]*/
tmp = 1.0 / rsd[i][j][k][0];
/*[48]*/
u21k = tmp * rsd[i][j][k][1];
/*[48]*/
u31k = tmp * rsd[i][j][k][2];
/*[48]*/
u41k = tmp * rsd[i][j][k][3];
/*[48]*/
u51k = tmp * rsd[i][j][k][4];
/*[48]*/
tmp = 1.0 / rsd[i][j][k - 1][0];
/*[48]*/
u21km1 = tmp * rsd[i][j][k - 1][1];
/*[48]*/
u31km1 = tmp * rsd[i][j][k - 1][2];
/*[48]*/
u41km1 = tmp * rsd[i][j][k - 1][3];
/*[48]*/
u51km1 = tmp * rsd[i][j][k - 1][4];
/*[48]*/
flux[i][j][k][1] = tz3 * (u21k - u21km1);
/*[48]*/
flux[i][j][k][2] = tz3 * (u31k - u31km1);
/*[48]*/
flux[i][j][k][3] = (4.0 / 3.0) * tz3 * (u41k - u41km1);
/*[48]*/
flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tz3 * ((u21k * u21k + u31k * u31k + u41k * u41k) - (u21km1 * u21km1 + u31km1 * u31km1 + u41km1 * u41km1)) + (1.0 / 6.0) * tz3 * (u41k * u41k - u41km1 * u41km1) + 1.40e+00 * 1.40e+00 * tz3 * (u51k - u51km1);
}
/*[48]*/
/*[48]*/
/*[48]*/
/*[48]*/
for (k = 1; k <= nz - 2; k++) {
/*[48]*/
/*[48]*/
frct[i][j][k][0] = frct[i][j][k][0] + dz1 * tz1 * (rsd[i][j][k + 1][0] - 2.0 * rsd[i][j][k][0] + rsd[i][j][k - 1][0]);
/*[48]*/
frct[i][j][k][1] = frct[i][j][k][1] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][1] - flux[i][j][k][1]) + dz2 * tz1 * (rsd[i][j][k + 1][1] - 2.0 * rsd[i][j][k][1] + rsd[i][j][k - 1][1]);
/*[48]*/
frct[i][j][k][2] = frct[i][j][k][2] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][2] - flux[i][j][k][2]) + dz3 * tz1 * (rsd[i][j][k + 1][2] - 2.0 * rsd[i][j][k][2] + rsd[i][j][k - 1][2]);
/*[48]*/
frct[i][j][k][3] = frct[i][j][k][3] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][3] - flux[i][j][k][3]) + dz4 * tz1 * (rsd[i][j][k + 1][3] - 2.0 * rsd[i][j][k][3] + rsd[i][j][k - 1][3]);
/*[48]*/
frct[i][j][k][4] = frct[i][j][k][4] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][4] - flux[i][j][k][4]) + dz5 * tz1 * (rsd[i][j][k + 1][4] - 2.0 * rsd[i][j][k][4] + rsd[i][j][k - 1][4]);
}
/*[48]*/
/*[48]*/
/*[48]*/
/*[48]*/
for (m = 0; m < 5; m++) {
/*[48]*/
/*[48]*/
frct[i][j][1][m] = frct[i][j][1][m] - dsspm * (+5.0 * rsd[i][j][1][m] - 4.0 * rsd[i][j][2][m] + rsd[i][j][3][m]);
/*[48]*/
frct[i][j][2][m] = frct[i][j][2][m] - dsspm * (-4.0 * rsd[i][j][1][m] + 6.0 * rsd[i][j][2][m] - 4.0 * rsd[i][j][3][m] + rsd[i][j][4][m]);
}
/*[48]*/
/*[48]*/
/*[48]*/
/*[48]*/
for (k = 3; k <= nz - 4; k++) {
/*[48]*/
/*[48]*/
/*[48]*/
/*[48]*/
/*[48]*/
for (m = 0; m < 5; m++) {
/*[48]*/
/*[48]*/
frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i][j][k - 2][m] - 4.0 * rsd[i][j][k - 1][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i][j][k + 1][m] + rsd[i][j][k + 2][m]);
}
}
/*[48]*/
/*[48]*/
/*[48]*/
/*[48]*/
for (m = 0; m < 5; m++) {
/*[48]*/
/*[48]*/
frct[i][j][nz - 3][m] = frct[i][j][nz - 3][m] - dsspm * (rsd[i][j][nz - 5][m] - 4.0 * rsd[i][j][nz - 4][m] + 6.0 * rsd[i][j][nz - 3][m] - 4.0 * rsd[i][j][nz - 2][m]);
/*[48]*/
frct[i][j][nz - 2][m] = frct[i][j][nz - 2][m] - dsspm * (rsd[i][j][nz - 4][m] - 4.0 * rsd[i][j][nz - 3][m] + 5.0 * rsd[i][j][nz - 2][m]);
}
}
}
}
}
/*[]*/
static void error() {
/*[]*/
/*[]*/
int i;
/*[]*/
int j;
/*[]*/
int k;
/*[]*/
int m;
/*[]*/
int iglob;
/*[]*/
int jglob;
/*[]*/
double tmp;
/*[]*/
double u000ijk[5];
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (m = 0; m < 5; m++) {
/*[]*/
/*[]*/
errnm[m] = 0.0;
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (i = ist; i <= iend; i++) {
/*[]*/
/*[]*/
iglob = i;
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (j = jst; j <= jend; j++) {
/*[]*/
/*[]*/
jglob = j;
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (k = 1; k <= nz - 2; k++) {
/*[]*/
/*[]*/
exact(iglob, jglob, k, u000ijk);
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (m = 0; m < 5; m++) {
/*[]*/
/*[]*/
tmp = (u000ijk[m] - u[i][j][k][m]);
/*[]*/
errnm[m] = errnm[m] + tmp * tmp;
}
}
}
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (m = 0; m < 5; m++) {
/*[]*/
/*[]*/
double _imopVarPre151;
/*[]*/
double _imopVarPre152;
/*[]*/
_imopVarPre151 = errnm[m] / ((nx0 - 2) * (ny0 - 2) * (nz0 - 2));
/*[]*/
_imopVarPre152 = sqrt(_imopVarPre151);
/*[]*/
/*[]*/
errnm[m] = _imopVarPre152;
}
}
/*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/
/*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/
/*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/
/*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/
/*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/
static void exact(int i, int j , int k , double u000ijk[5]) {
/*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/
/*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/
int m;
/*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/
double xi;
/*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/
double eta;
/*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/
double zeta;
/*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/
xi = ((double) i) / (nx0 - 1);
/*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/
eta = ((double) j) / (ny0 - 1);
/*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/
zeta = ((double) k) / (nz - 1);
/*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/
/*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/
/*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/
/*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/
for (m = 0; m < 5; m++) {
/*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/
/*[1, 2, 3, 4, 5, 6, 49, 50, 51, 52, 53, 54]*/
u000ijk[m] = ce[m][0] + ce[m][1] * xi + ce[m][2] * eta + ce[m][3] * zeta + ce[m][4] * xi * xi + ce[m][5] * eta * eta + ce[m][6] * zeta * zeta + ce[m][7] * xi * xi * xi + ce[m][8] * eta * eta * eta + ce[m][9] * zeta * zeta * zeta + ce[m][10] * xi * xi * xi * xi + ce[m][11] * eta * eta * eta * eta + ce[m][12] * zeta * zeta * zeta * zeta;
}
}
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
static void jacld(int k) {
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
int i;
/*[13, 14, 25, 41]*/
int j;
/*[13, 14, 25, 41]*/
double r43;
/*[13, 14, 25, 41]*/
double c1345;
/*[13, 14, 25, 41]*/
double c34;
/*[13, 14, 25, 41]*/
double tmp1;
/*[13, 14, 25, 41]*/
double tmp2;
/*[13, 14, 25, 41]*/
double tmp3;
/*[13, 14, 25, 41]*/
r43 = (4.0 / 3.0);
/*[13, 14, 25, 41]*/
c1345 = 1.40e+00 * 1.00e-01 * 1.00e+00 * 1.40e+00;
/*[13, 14, 25, 41]*/
c34 = 1.00e-01 * 1.00e+00;
/*[13, 14, 25, 41]*/
#pragma omp for nowait schedule(static)
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
for (i = ist; i <= iend; i++) {
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
for (j = jst; j <= jend; j++) {
/*[13, 14, 25, 41]*/
/*[13, 14, 25, 41]*/
tmp1 = 1.0 / u[i][j][k][0];
/*[13, 14, 25, 41]*/
tmp2 = tmp1 * tmp1;
/*[13, 14, 25, 41]*/
tmp3 = tmp1 * tmp2;
/*[13, 14, 25, 41]*/
d[i][j][0][0] = 1.0 + dt * 2.0 * (tx1 * dx1 + ty1 * dy1 + tz1 * dz1);
/*[13, 14, 25, 41]*/
d[i][j][0][1] = 0.0;
/*[13, 14, 25, 41]*/
d[i][j][0][2] = 0.0;
/*[13, 14, 25, 41]*/
d[i][j][0][3] = 0.0;
/*[13, 14, 25, 41]*/
d[i][j][0][4] = 0.0;
/*[13, 14, 25, 41]*/
d[i][j][1][0] = dt * 2.0 * (tx1 * (-r43 * c34 * tmp2 * u[i][j][k][1]) + ty1 * (-c34 * tmp2 * u[i][j][k][1]) + tz1 * (-c34 * tmp2 * u[i][j][k][1]));
/*[13, 14, 25, 41]*/
d[i][j][1][1] = 1.0 + dt * 2.0 * (tx1 * r43 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * c34 * tmp1) + dt * 2.0 * (tx1 * dx2 + ty1 * dy2 + tz1 * dz2);
/*[13, 14, 25, 41]*/
d[i][j][1][2] = 0.0;
/*[13, 14, 25, 41]*/
d[i][j][1][3] = 0.0;
/*[13, 14, 25, 41]*/
d[i][j][1][4] = 0.0;
/*[13, 14, 25, 41]*/
d[i][j][2][0] = dt * 2.0 * (tx1 * (-c34 * tmp2 * u[i][j][k][2]) + ty1 * (-r43 * c34 * tmp2 * u[i][j][k][2]) + tz1 * (-c34 * tmp2 * u[i][j][k][2]));
/*[13, 14, 25, 41]*/
d[i][j][2][1] = 0.0;
/*[13, 14, 25, 41]*/
d[i][j][2][2] = 1.0 + dt * 2.0 * (tx1 * c34 * tmp1 + ty1 * r43 * c34 * tmp1 + tz1 * c34 * tmp1) + dt * 2.0 * (tx1 * dx3 + ty1 * dy3 + tz1 * dz3);
/*[13, 14, 25, 41]*/
d[i][j][2][3] = 0.0;
/*[13, 14, 25, 41]*/
d[i][j][2][4] = 0.0;
/*[13, 14, 25, 41]*/
d[i][j][3][0] = dt * 2.0 * (tx1 * (-c34 * tmp2 * u[i][j][k][3]) + ty1 * (-c34 * tmp2 * u[i][j][k][3]) + tz1 * (-r43 * c34 * tmp2 * u[i][j][k][3]));
/*[13, 14, 25, 41]*/
d[i][j][3][1] = 0.0;
/*[13, 14, 25, 41]*/
d[i][j][3][2] = 0.0;
/*[13, 14, 25, 41]*/
d[i][j][3][3] = 1.0 + dt * 2.0 * (tx1 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * r43 * c34 * tmp1) + dt * 2.0 * (tx1 * dx4 + ty1 * dy4 + tz1 * dz4);
/*[13, 14, 25, 41]*/
d[i][j][3][4] = 0.0;
/*[13, 14, 25, 41]*/
d[i][j][4][0] = dt * 2.0 * (tx1 * (-(r43 * c34 - c1345) * tmp3 * (((u[i][j][k][1]) * (u[i][j][k][1]))) - (c34 - c1345) * tmp3 * (((u[i][j][k][2]) * (u[i][j][k][2]))) - (c34 - c1345) * tmp3 * (((u[i][j][k][3]) * (u[i][j][k][3]))) - c1345 * tmp2 * u[i][j][k][4]) + ty1 * (-(c34 - c1345) * tmp3 * (((u[i][j][k][1]) * (u[i][j][k][1]))) - (r43 * c34 - c1345) * tmp3 * (((u[i][j][k][2]) * (u[i][j][k][2]))) - (c34 - c1345) * tmp3 * (((u[i][j][k][3]) * (u[i][j][k][3]))) - c1345 * tmp2 * u[i][j][k][4]) + tz1 * (-(c34 - c1345) * tmp3 * (((u[i][j][k][1]) * (u[i][j][k][1]))) - (c34 - c1345) * tmp3 * (((u[i][j][k][2]) * (u[i][j][k][2]))) - (r43 * c34 - c1345) * tmp3 * (((u[i][j][k][3]) * (u[i][j][k][3]))) - c1345 * tmp2 * u[i][j][k][4]));
/*[13, 14, 25, 41]*/
d[i][j][4][1] = dt * 2.0 * (tx1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][1] + ty1 * (c34 - c1345) * tmp2 * u[i][j][k][1] + tz1 * (c34 - c1345) * tmp2 * u[i][j][k][1]);
/*[13, 14, 25, 41]*/
d[i][j][4][2] = dt * 2.0 * (tx1 * (c34 - c1345) * tmp2 * u[i][j][k][2] + ty1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][2] + tz1 * (c34 - c1345) * tmp2 * u[i][j][k][2]);
/*[13, 14, 25, 41]*/
d[i][j][4][3] = dt * 2.0 * (tx1 * (c34 - c1345) * tmp2 * u[i][j][k][3] + ty1 * (c34 - c1345) * tmp2 * u[i][j][k][3] + tz1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][3]);
/*[13, 14, 25, 41]*/
d[i][j][4][4] = 1.0 + dt * 2.0 * (tx1 * c1345 * tmp1 + ty1 * c1345 * tmp1 + tz1 * c1345 * tmp1) + dt * 2.0 * (tx1 * dx5 + ty1 * dy5 + tz1 * dz5);
/*[13, 14, 25, 41]*/
tmp1 = 1.0 / u[i][j][k - 1][0];
/*[13, 14, 25, 41]*/
tmp2 = tmp1 * tmp1;
/*[13, 14, 25, 41]*/
tmp3 = tmp1 * tmp2;
/*[13, 14, 25, 41]*/
a[i][j][0][0] = -dt * tz1 * dz1;
/*[13, 14, 25, 41]*/
a[i][j][0][1] = 0.0;
/*[13, 14, 25, 41]*/
a[i][j][0][2] = 0.0;
/*[13, 14, 25, 41]*/
a[i][j][0][3] = -dt * tz2;
/*[13, 14, 25, 41]*/
a[i][j][0][4] = 0.0;
/*[13, 14, 25, 41]*/
a[i][j][1][0] = -dt * tz2 * (-(u[i][j][k - 1][1] * u[i][j][k - 1][3]) * tmp2) - dt * tz1 * (-c34 * tmp2 * u[i][j][k - 1][1]);
/*[13, 14, 25, 41]*/
a[i][j][1][1] = -dt * tz2 * (u[i][j][k - 1][3] * tmp1) - dt * tz1 * c34 * tmp1 - dt * tz1 * dz2;
/*[13, 14, 25, 41]*/
a[i][j][1][2] = 0.0;
/*[13, 14, 25, 41]*/
a[i][j][1][3] = -dt * tz2 * (u[i][j][k - 1][1] * tmp1);
/*[13, 14, 25, 41]*/
a[i][j][1][4] = 0.0;
/*[13, 14, 25, 41]*/
a[i][j][2][0] = -dt * tz2 * (-(u[i][j][k - 1][2] * u[i][j][k - 1][3]) * tmp2) - dt * tz1 * (-c34 * tmp2 * u[i][j][k - 1][2]);
/*[13, 14, 25, 41]*/
a[i][j][2][1] = 0.0;
/*[13, 14, 25, 41]*/
a[i][j][2][2] = -dt * tz2 * (u[i][j][k - 1][3] * tmp1) - dt * tz1 * (c34 * tmp1) - dt * tz1 * dz3;
/*[13, 14, 25, 41]*/
a[i][j][2][3] = -dt * tz2 * (u[i][j][k - 1][2] * tmp1);
/*[13, 14, 25, 41]*/
a[i][j][2][4] = 0.0;
/*[13, 14, 25, 41]*/
a[i][j][3][0] = -dt * tz2 * (-(u[i][j][k - 1][3] * tmp1) * (u[i][j][k - 1][3] * tmp1) + 0.50 * 0.40e+00 * ((u[i][j][k - 1][1] * u[i][j][k - 1][1] + u[i][j][k - 1][2] * u[i][j][k - 1][2] + u[i][j][k - 1][3] * u[i][j][k - 1][3]) * tmp2)) - dt * tz1 * (-r43 * c34 * tmp2 * u[i][j][k - 1][3]);
/*[13, 14, 25, 41]*/
a[i][j][3][1] = -dt * tz2 * (-0.40e+00 * (u[i][j][k - 1][1] * tmp1));
/*[13, 14, 25, 41]*/
a[i][j][3][2] = -dt * tz2 * (-0.40e+00 * (u[i][j][k - 1][2] * tmp1));
/*[13, 14, 25, 41]*/
a[i][j][3][3] = -dt * tz2 * (2.0 - 0.40e+00) * (u[i][j][k - 1][3] * tmp1) - dt * tz1 * (r43 * c34 * tmp1) - dt * tz1 * dz4;
/*[13, 14, 25, 41]*/
a[i][j][3][4] = -dt * tz2 * 0.40e+00;
/*[13, 14, 25, 41]*/
a[i][j][4][0] = -dt * tz2 * ((0.40e+00 * (u[i][j][k - 1][1] * u[i][j][k - 1][1] + u[i][j][k - 1][2] * u[i][j][k - 1][2] + u[i][j][k - 1][3] * u[i][j][k - 1][3]) * tmp2 - 1.40e+00 * (u[i][j][k - 1][4] * tmp1)) * (u[i][j][k - 1][3] * tmp1)) - dt * tz1 * (-(c34 - c1345) * tmp3 * (u[i][j][k - 1][1] * u[i][j][k - 1][1]) - (c34 - c1345) * tmp3 * (u[i][j][k - 1][2] * u[i][j][k - 1][2]) - (r43 * c34 - c1345) * tmp3 * (u[i][j][k - 1][3] * u[i][j][k - 1][3]) - c1345 * tmp2 * u[i][j][k - 1][4]);
/*[13, 14, 25, 41]*/
a[i][j][4][1] = -dt * tz2 * (-0.40e+00 * (u[i][j][k - 1][1] * u[i][j][k - 1][3]) * tmp2) - dt * tz1 * (c34 - c1345) * tmp2 * u[i][j][k - 1][1];
/*[13, 14, 25, 41]*/
a[i][j][4][2] = -dt * tz2 * (-0.40e+00 * (u[i][j][k - 1][2] * u[i][j][k - 1][3]) * tmp2) - dt * tz1 * (c34 - c1345) * tmp2 * u[i][j][k - 1][2];
/*[13, 14, 25, 41]*/
a[i][j][4][3] = -dt * tz2 * (1.40e+00 * (u[i][j][k - 1][4] * tmp1) - 0.50 * 0.40e+00 * ((u[i][j][k - 1][1] * u[i][j][k - 1][1] + u[i][j][k - 1][2] * u[i][j][k - 1][2] + 3.0 * u[i][j][k - 1][3] * u[i][j][k - 1][3]) * tmp2)) - dt * tz1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k - 1][3];
/*[13, 14, 25, 41]*/
a[i][j][4][4] = -dt * tz2 * (1.40e+00 * (u[i][j][k - 1][3] * tmp1)) - dt * tz1 * c1345 * tmp1 - dt * tz1 * dz5;
/*[13, 14, 25, 41]*/
tmp1 = 1.0 / u[i][j - 1][k][0];
/*[13, 14, 25, 41]*/
tmp2 = tmp1 * tmp1;
/*[13, 14, 25, 41]*/
tmp3 = tmp1 * tmp2;
/*[13, 14, 25, 41]*/
b[i][j][0][0] = -dt * ty1 * dy1;
/*[13, 14, 25, 41]*/
b[i][j][0][1] = 0.0;
/*[13, 14, 25, 41]*/
b[i][j][0][2] = -dt * ty2;
/*[13, 14, 25, 41]*/
b[i][j][0][3] = 0.0;
/*[13, 14, 25, 41]*/
b[i][j][0][4] = 0.0;
/*[13, 14, 25, 41]*/
b[i][j][1][0] = -dt * ty2 * (-(u[i][j - 1][k][1] * u[i][j - 1][k][2]) * tmp2) - dt * ty1 * (-c34 * tmp2 * u[i][j - 1][k][1]);
/*[13, 14, 25, 41]*/
b[i][j][1][1] = -dt * ty2 * (u[i][j - 1][k][2] * tmp1) - dt * ty1 * (c34 * tmp1) - dt * ty1 * dy2;
/*[13, 14, 25, 41]*/
b[i][j][1][2] = -dt * ty2 * (u[i][j - 1][k][1] * tmp1);
/*[13, 14, 25, 41]*/
b[i][j][1][3] = 0.0;
/*[13, 14, 25, 41]*/
b[i][j][1][4] = 0.0;
/*[13, 14, 25, 41]*/
b[i][j][2][0] = -dt * ty2 * (-(u[i][j - 1][k][2] * tmp1) * (u[i][j - 1][k][2] * tmp1) + 0.50 * 0.40e+00 * ((u[i][j - 1][k][1] * u[i][j - 1][k][1] + u[i][j - 1][k][2] * u[i][j - 1][k][2] + u[i][j - 1][k][3] * u[i][j - 1][k][3]) * tmp2)) - dt * ty1 * (-r43 * c34 * tmp2 * u[i][j - 1][k][2]);
/*[13, 14, 25, 41]*/
b[i][j][2][1] = -dt * ty2 * (-0.40e+00 * (u[i][j - 1][k][1] * tmp1));
/*[13, 14, 25, 41]*/
b[i][j][2][2] = -dt * ty2 * ((2.0 - 0.40e+00) * (u[i][j - 1][k][2] * tmp1)) - dt * ty1 * (r43 * c34 * tmp1) - dt * ty1 * dy3;
/*[13, 14, 25, 41]*/
b[i][j][2][3] = -dt * ty2 * (-0.40e+00 * (u[i][j - 1][k][3] * tmp1));
/*[13, 14, 25, 41]*/
b[i][j][2][4] = -dt * ty2 * 0.40e+00;
/*[13, 14, 25, 41]*/
b[i][j][3][0] = -dt * ty2 * (-(u[i][j - 1][k][2] * u[i][j - 1][k][3]) * tmp2) - dt * ty1 * (-c34 * tmp2 * u[i][j - 1][k][3]);
/*[13, 14, 25, 41]*/
b[i][j][3][1] = 0.0;
/*[13, 14, 25, 41]*/
b[i][j][3][2] = -dt * ty2 * (u[i][j - 1][k][3] * tmp1);
/*[13, 14, 25, 41]*/
b[i][j][3][3] = -dt * ty2 * (u[i][j - 1][k][2] * tmp1) - dt * ty1 * (c34 * tmp1) - dt * ty1 * dy4;
/*[13, 14, 25, 41]*/
b[i][j][3][4] = 0.0;
/*[13, 14, 25, 41]*/
b[i][j][4][0] = -dt * ty2 * ((0.40e+00 * (u[i][j - 1][k][1] * u[i][j - 1][k][1] + u[i][j - 1][k][2] * u[i][j - 1][k][2] + u[i][j - 1][k][3] * u[i][j - 1][k][3]) * tmp2 - 1.40e+00 * (u[i][j - 1][k][4] * tmp1)) * (u[i][j - 1][k][2] * tmp1)) - dt * ty1 * (-(c34 - c1345) * tmp3 * (((u[i][j - 1][k][1]) * (u[i][j - 1][k][1]))) - (r43 * c34 - c1345) * tmp3 * (((u[i][j - 1][k][2]) * (u[i][j - 1][k][2]))) - (c34 - c1345) * tmp3 * (((u[i][j - 1][k][3]) * (u[i][j - 1][k][3]))) - c1345 * tmp2 * u[i][j - 1][k][4]);
/*[13, 14, 25, 41]*/
b[i][j][4][1] = -dt * ty2 * (-0.40e+00 * (u[i][j - 1][k][1] * u[i][j - 1][k][2]) * tmp2) - dt * ty1 * (c34 - c1345) * tmp2 * u[i][j - 1][k][1];
/*[13, 14, 25, 41]*/
b[i][j][4][2] = -dt * ty2 * (1.40e+00 * (u[i][j - 1][k][4] * tmp1) - 0.50 * 0.40e+00 * ((u[i][j - 1][k][1] * u[i][j - 1][k][1] + 3.0 * u[i][j - 1][k][2] * u[i][j - 1][k][2] + u[i][j - 1][k][3] * u[i][j - 1][k][3]) * tmp2)) - dt * ty1 * (r43 * c34 - c1345) * tmp2 * u[i][j - 1][k][2];
/*[13, 14, 25, 41]*/
b[i][j][4][3] = -dt * ty2 * (-0.40e+00 * (u[i][j - 1][k][2] * u[i][j - 1][k][3]) * tmp2) - dt * ty1 * (c34 - c1345) * tmp2 * u[i][j - 1][k][3];
/*[13, 14, 25, 41]*/
b[i][j][4][4] = -dt * ty2 * (1.40e+00 * (u[i][j - 1][k][2] * tmp1)) - dt * ty1 * c1345 * tmp1 - dt * ty1 * dy5;
/*[13, 14, 25, 41]*/
tmp1 = 1.0 / u[i - 1][j][k][0];
/*[13, 14, 25, 41]*/
tmp2 = tmp1 * tmp1;
/*[13, 14, 25, 41]*/
tmp3 = tmp1 * tmp2;
/*[13, 14, 25, 41]*/
c[i][j][0][0] = -dt * tx1 * dx1;
/*[13, 14, 25, 41]*/
c[i][j][0][1] = -dt * tx2;
/*[13, 14, 25, 41]*/
c[i][j][0][2] = 0.0;
/*[13, 14, 25, 41]*/
c[i][j][0][3] = 0.0;
/*[13, 14, 25, 41]*/
c[i][j][0][4] = 0.0;
/*[13, 14, 25, 41]*/
c[i][j][1][0] = -dt * tx2 * (-(u[i - 1][j][k][1] * tmp1) * (u[i - 1][j][k][1] * tmp1) + 0.40e+00 * 0.50 * (u[i - 1][j][k][1] * u[i - 1][j][k][1] + u[i - 1][j][k][2] * u[i - 1][j][k][2] + u[i - 1][j][k][3] * u[i - 1][j][k][3]) * tmp2) - dt * tx1 * (-r43 * c34 * tmp2 * u[i - 1][j][k][1]);
/*[13, 14, 25, 41]*/
c[i][j][1][1] = -dt * tx2 * ((2.0 - 0.40e+00) * (u[i - 1][j][k][1] * tmp1)) - dt * tx1 * (r43 * c34 * tmp1) - dt * tx1 * dx2;
/*[13, 14, 25, 41]*/
c[i][j][1][2] = -dt * tx2 * (-0.40e+00 * (u[i - 1][j][k][2] * tmp1));
/*[13, 14, 25, 41]*/
c[i][j][1][3] = -dt * tx2 * (-0.40e+00 * (u[i - 1][j][k][3] * tmp1));
/*[13, 14, 25, 41]*/
c[i][j][1][4] = -dt * tx2 * 0.40e+00;
/*[13, 14, 25, 41]*/
c[i][j][2][0] = -dt * tx2 * (-(u[i - 1][j][k][1] * u[i - 1][j][k][2]) * tmp2) - dt * tx1 * (-c34 * tmp2 * u[i - 1][j][k][2]);
/*[13, 14, 25, 41]*/
c[i][j][2][1] = -dt * tx2 * (u[i - 1][j][k][2] * tmp1);
/*[13, 14, 25, 41]*/
c[i][j][2][2] = -dt * tx2 * (u[i - 1][j][k][1] * tmp1) - dt * tx1 * (c34 * tmp1) - dt * tx1 * dx3;
/*[13, 14, 25, 41]*/
c[i][j][2][3] = 0.0;
/*[13, 14, 25, 41]*/
c[i][j][2][4] = 0.0;
/*[13, 14, 25, 41]*/
c[i][j][3][0] = -dt * tx2 * (-(u[i - 1][j][k][1] * u[i - 1][j][k][3]) * tmp2) - dt * tx1 * (-c34 * tmp2 * u[i - 1][j][k][3]);
/*[13, 14, 25, 41]*/
c[i][j][3][1] = -dt * tx2 * (u[i - 1][j][k][3] * tmp1);
/*[13, 14, 25, 41]*/
c[i][j][3][2] = 0.0;
/*[13, 14, 25, 41]*/
c[i][j][3][3] = -dt * tx2 * (u[i - 1][j][k][1] * tmp1) - dt * tx1 * (c34 * tmp1) - dt * tx1 * dx4;
/*[13, 14, 25, 41]*/
c[i][j][3][4] = 0.0;
/*[13, 14, 25, 41]*/
c[i][j][4][0] = -dt * tx2 * ((0.40e+00 * (u[i - 1][j][k][1] * u[i - 1][j][k][1] + u[i - 1][j][k][2] * u[i - 1][j][k][2] + u[i - 1][j][k][3] * u[i - 1][j][k][3]) * tmp2 - 1.40e+00 * (u[i - 1][j][k][4] * tmp1)) * (u[i - 1][j][k][1] * tmp1)) - dt * tx1 * (-(r43 * c34 - c1345) * tmp3 * (((u[i - 1][j][k][1]) * (u[i - 1][j][k][1]))) - (c34 - c1345) * tmp3 * (((u[i - 1][j][k][2]) * (u[i - 1][j][k][2]))) - (c34 - c1345) * tmp3 * (((u[i - 1][j][k][3]) * (u[i - 1][j][k][3]))) - c1345 * tmp2 * u[i - 1][j][k][4]);
/*[13, 14, 25, 41]*/
c[i][j][4][1] = -dt * tx2 * (1.40e+00 * (u[i - 1][j][k][4] * tmp1) - 0.50 * 0.40e+00 * ((3.0 * u[i - 1][j][k][1] * u[i - 1][j][k][1] + u[i - 1][j][k][2] * u[i - 1][j][k][2] + u[i - 1][j][k][3] * u[i - 1][j][k][3]) * tmp2)) - dt * tx1 * (r43 * c34 - c1345) * tmp2 * u[i - 1][j][k][1];
/*[13, 14, 25, 41]*/
c[i][j][4][2] = -dt * tx2 * (-0.40e+00 * (u[i - 1][j][k][2] * u[i - 1][j][k][1]) * tmp2) - dt * tx1 * (c34 - c1345) * tmp2 * u[i - 1][j][k][2];
/*[13, 14, 25, 41]*/
c[i][j][4][3] = -dt * tx2 * (-0.40e+00 * (u[i - 1][j][k][3] * u[i - 1][j][k][1]) * tmp2) - dt * tx1 * (c34 - c1345) * tmp2 * u[i - 1][j][k][3];
/*[13, 14, 25, 41]*/
c[i][j][4][4] = -dt * tx2 * (1.40e+00 * (u[i - 1][j][k][1] * tmp1)) - dt * tx1 * c1345 * tmp1 - dt * tx1 * dx5;
}
}
}
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
static void jacu(int k) {
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
int i;
/*[13, 14, 26, 42]*/
int j;
/*[13, 14, 26, 42]*/
double r43;
/*[13, 14, 26, 42]*/
double c1345;
/*[13, 14, 26, 42]*/
double c34;
/*[13, 14, 26, 42]*/
double tmp1;
/*[13, 14, 26, 42]*/
double tmp2;
/*[13, 14, 26, 42]*/
double tmp3;
/*[13, 14, 26, 42]*/
r43 = (4.0 / 3.0);
/*[13, 14, 26, 42]*/
c1345 = 1.40e+00 * 1.00e-01 * 1.00e+00 * 1.40e+00;
/*[13, 14, 26, 42]*/
c34 = 1.00e-01 * 1.00e+00;
/*[13, 14, 26, 42]*/
#pragma omp for nowait schedule(static)
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
for (i = iend; i >= ist; i--) {
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
for (j = jend; j >= jst; j--) {
/*[13, 14, 26, 42]*/
/*[13, 14, 26, 42]*/
tmp1 = 1.0 / u[i][j][k][0];
/*[13, 14, 26, 42]*/
tmp2 = tmp1 * tmp1;
/*[13, 14, 26, 42]*/
tmp3 = tmp1 * tmp2;
/*[13, 14, 26, 42]*/
d[i][j][0][0] = 1.0 + dt * 2.0 * (tx1 * dx1 + ty1 * dy1 + tz1 * dz1);
/*[13, 14, 26, 42]*/
d[i][j][0][1] = 0.0;
/*[13, 14, 26, 42]*/
d[i][j][0][2] = 0.0;
/*[13, 14, 26, 42]*/
d[i][j][0][3] = 0.0;
/*[13, 14, 26, 42]*/
d[i][j][0][4] = 0.0;
/*[13, 14, 26, 42]*/
d[i][j][1][0] = dt * 2.0 * (tx1 * (-r43 * c34 * tmp2 * u[i][j][k][1]) + ty1 * (-c34 * tmp2 * u[i][j][k][1]) + tz1 * (-c34 * tmp2 * u[i][j][k][1]));
/*[13, 14, 26, 42]*/
d[i][j][1][1] = 1.0 + dt * 2.0 * (tx1 * r43 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * c34 * tmp1) + dt * 2.0 * (tx1 * dx2 + ty1 * dy2 + tz1 * dz2);
/*[13, 14, 26, 42]*/
d[i][j][1][2] = 0.0;
/*[13, 14, 26, 42]*/
d[i][j][1][3] = 0.0;
/*[13, 14, 26, 42]*/
d[i][j][1][4] = 0.0;
/*[13, 14, 26, 42]*/
d[i][j][2][0] = dt * 2.0 * (tx1 * (-c34 * tmp2 * u[i][j][k][2]) + ty1 * (-r43 * c34 * tmp2 * u[i][j][k][2]) + tz1 * (-c34 * tmp2 * u[i][j][k][2]));
/*[13, 14, 26, 42]*/
d[i][j][2][1] = 0.0;
/*[13, 14, 26, 42]*/
d[i][j][2][2] = 1.0 + dt * 2.0 * (tx1 * c34 * tmp1 + ty1 * r43 * c34 * tmp1 + tz1 * c34 * tmp1) + dt * 2.0 * (tx1 * dx3 + ty1 * dy3 + tz1 * dz3);
/*[13, 14, 26, 42]*/
d[i][j][2][3] = 0.0;
/*[13, 14, 26, 42]*/
d[i][j][2][4] = 0.0;
/*[13, 14, 26, 42]*/
d[i][j][3][0] = dt * 2.0 * (tx1 * (-c34 * tmp2 * u[i][j][k][3]) + ty1 * (-c34 * tmp2 * u[i][j][k][3]) + tz1 * (-r43 * c34 * tmp2 * u[i][j][k][3]));
/*[13, 14, 26, 42]*/
d[i][j][3][1] = 0.0;
/*[13, 14, 26, 42]*/
d[i][j][3][2] = 0.0;
/*[13, 14, 26, 42]*/
d[i][j][3][3] = 1.0 + dt * 2.0 * (tx1 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * r43 * c34 * tmp1) + dt * 2.0 * (tx1 * dx4 + ty1 * dy4 + tz1 * dz4);
/*[13, 14, 26, 42]*/
d[i][j][3][4] = 0.0;
/*[13, 14, 26, 42]*/
d[i][j][4][0] = dt * 2.0 * (tx1 * (-(r43 * c34 - c1345) * tmp3 * (((u[i][j][k][1]) * (u[i][j][k][1]))) - (c34 - c1345) * tmp3 * (((u[i][j][k][2]) * (u[i][j][k][2]))) - (c34 - c1345) * tmp3 * (((u[i][j][k][3]) * (u[i][j][k][3]))) - c1345 * tmp2 * u[i][j][k][4]) + ty1 * (-(c34 - c1345) * tmp3 * (((u[i][j][k][1]) * (u[i][j][k][1]))) - (r43 * c34 - c1345) * tmp3 * (((u[i][j][k][2]) * (u[i][j][k][2]))) - (c34 - c1345) * tmp3 * (((u[i][j][k][3]) * (u[i][j][k][3]))) - c1345 * tmp2 * u[i][j][k][4]) + tz1 * (-(c34 - c1345) * tmp3 * (((u[i][j][k][1]) * (u[i][j][k][1]))) - (c34 - c1345) * tmp3 * (((u[i][j][k][2]) * (u[i][j][k][2]))) - (r43 * c34 - c1345) * tmp3 * (((u[i][j][k][3]) * (u[i][j][k][3]))) - c1345 * tmp2 * u[i][j][k][4]));
/*[13, 14, 26, 42]*/
d[i][j][4][1] = dt * 2.0 * (tx1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][1] + ty1 * (c34 - c1345) * tmp2 * u[i][j][k][1] + tz1 * (c34 - c1345) * tmp2 * u[i][j][k][1]);
/*[13, 14, 26, 42]*/
d[i][j][4][2] = dt * 2.0 * (tx1 * (c34 - c1345) * tmp2 * u[i][j][k][2] + ty1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][2] + tz1 * (c34 - c1345) * tmp2 * u[i][j][k][2]);
/*[13, 14, 26, 42]*/
d[i][j][4][3] = dt * 2.0 * (tx1 * (c34 - c1345) * tmp2 * u[i][j][k][3] + ty1 * (c34 - c1345) * tmp2 * u[i][j][k][3] + tz1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][3]);
/*[13, 14, 26, 42]*/
d[i][j][4][4] = 1.0 + dt * 2.0 * (tx1 * c1345 * tmp1 + ty1 * c1345 * tmp1 + tz1 * c1345 * tmp1) + dt * 2.0 * (tx1 * dx5 + ty1 * dy5 + tz1 * dz5);
/*[13, 14, 26, 42]*/
tmp1 = 1.0 / u[i + 1][j][k][0];
/*[13, 14, 26, 42]*/
tmp2 = tmp1 * tmp1;
/*[13, 14, 26, 42]*/
tmp3 = tmp1 * tmp2;
/*[13, 14, 26, 42]*/
a[i][j][0][0] = -dt * tx1 * dx1;
/*[13, 14, 26, 42]*/
a[i][j][0][1] = dt * tx2;
/*[13, 14, 26, 42]*/
a[i][j][0][2] = 0.0;
/*[13, 14, 26, 42]*/
a[i][j][0][3] = 0.0;
/*[13, 14, 26, 42]*/
a[i][j][0][4] = 0.0;
/*[13, 14, 26, 42]*/
a[i][j][1][0] = dt * tx2 * (-(u[i + 1][j][k][1] * tmp1) * (u[i + 1][j][k][1] * tmp1) + 0.40e+00 * 0.50 * (u[i + 1][j][k][1] * u[i + 1][j][k][1] + u[i + 1][j][k][2] * u[i + 1][j][k][2] + u[i + 1][j][k][3] * u[i + 1][j][k][3]) * tmp2) - dt * tx1 * (-r43 * c34 * tmp2 * u[i + 1][j][k][1]);
/*[13, 14, 26, 42]*/
a[i][j][1][1] = dt * tx2 * ((2.0 - 0.40e+00) * (u[i + 1][j][k][1] * tmp1)) - dt * tx1 * (r43 * c34 * tmp1) - dt * tx1 * dx2;
/*[13, 14, 26, 42]*/
a[i][j][1][2] = dt * tx2 * (-0.40e+00 * (u[i + 1][j][k][2] * tmp1));
/*[13, 14, 26, 42]*/
a[i][j][1][3] = dt * tx2 * (-0.40e+00 * (u[i + 1][j][k][3] * tmp1));
/*[13, 14, 26, 42]*/
a[i][j][1][4] = dt * tx2 * 0.40e+00;
/*[13, 14, 26, 42]*/
a[i][j][2][0] = dt * tx2 * (-(u[i + 1][j][k][1] * u[i + 1][j][k][2]) * tmp2) - dt * tx1 * (-c34 * tmp2 * u[i + 1][j][k][2]);
/*[13, 14, 26, 42]*/
a[i][j][2][1] = dt * tx2 * (u[i + 1][j][k][2] * tmp1);
/*[13, 14, 26, 42]*/
a[i][j][2][2] = dt * tx2 * (u[i + 1][j][k][1] * tmp1) - dt * tx1 * (c34 * tmp1) - dt * tx1 * dx3;
/*[13, 14, 26, 42]*/
a[i][j][2][3] = 0.0;
/*[13, 14, 26, 42]*/
a[i][j][2][4] = 0.0;
/*[13, 14, 26, 42]*/
a[i][j][3][0] = dt * tx2 * (-(u[i + 1][j][k][1] * u[i + 1][j][k][3]) * tmp2) - dt * tx1 * (-c34 * tmp2 * u[i + 1][j][k][3]);
/*[13, 14, 26, 42]*/
a[i][j][3][1] = dt * tx2 * (u[i + 1][j][k][3] * tmp1);
/*[13, 14, 26, 42]*/
a[i][j][3][2] = 0.0;
/*[13, 14, 26, 42]*/
a[i][j][3][3] = dt * tx2 * (u[i + 1][j][k][1] * tmp1) - dt * tx1 * (c34 * tmp1) - dt * tx1 * dx4;
/*[13, 14, 26, 42]*/
a[i][j][3][4] = 0.0;
/*[13, 14, 26, 42]*/
a[i][j][4][0] = dt * tx2 * ((0.40e+00 * (u[i + 1][j][k][1] * u[i + 1][j][k][1] + u[i + 1][j][k][2] * u[i + 1][j][k][2] + u[i + 1][j][k][3] * u[i + 1][j][k][3]) * tmp2 - 1.40e+00 * (u[i + 1][j][k][4] * tmp1)) * (u[i + 1][j][k][1] * tmp1)) - dt * tx1 * (-(r43 * c34 - c1345) * tmp3 * (((u[i + 1][j][k][1]) * (u[i + 1][j][k][1]))) - (c34 - c1345) * tmp3 * (((u[i + 1][j][k][2]) * (u[i + 1][j][k][2]))) - (c34 - c1345) * tmp3 * (((u[i + 1][j][k][3]) * (u[i + 1][j][k][3]))) - c1345 * tmp2 * u[i + 1][j][k][4]);
/*[13, 14, 26, 42]*/
a[i][j][4][1] = dt * tx2 * (1.40e+00 * (u[i + 1][j][k][4] * tmp1) - 0.50 * 0.40e+00 * ((3.0 * u[i + 1][j][k][1] * u[i + 1][j][k][1] + u[i + 1][j][k][2] * u[i + 1][j][k][2] + u[i + 1][j][k][3] * u[i + 1][j][k][3]) * tmp2)) - dt * tx1 * (r43 * c34 - c1345) * tmp2 * u[i + 1][j][k][1];
/*[13, 14, 26, 42]*/
a[i][j][4][2] = dt * tx2 * (-0.40e+00 * (u[i + 1][j][k][2] * u[i + 1][j][k][1]) * tmp2) - dt * tx1 * (c34 - c1345) * tmp2 * u[i + 1][j][k][2];
/*[13, 14, 26, 42]*/
a[i][j][4][3] = dt * tx2 * (-0.40e+00 * (u[i + 1][j][k][3] * u[i + 1][j][k][1]) * tmp2) - dt * tx1 * (c34 - c1345) * tmp2 * u[i + 1][j][k][3];
/*[13, 14, 26, 42]*/
a[i][j][4][4] = dt * tx2 * (1.40e+00 * (u[i + 1][j][k][1] * tmp1)) - dt * tx1 * c1345 * tmp1 - dt * tx1 * dx5;
/*[13, 14, 26, 42]*/
tmp1 = 1.0 / u[i][j + 1][k][0];
/*[13, 14, 26, 42]*/
tmp2 = tmp1 * tmp1;
/*[13, 14, 26, 42]*/
tmp3 = tmp1 * tmp2;
/*[13, 14, 26, 42]*/
b[i][j][0][0] = -dt * ty1 * dy1;
/*[13, 14, 26, 42]*/
b[i][j][0][1] = 0.0;
/*[13, 14, 26, 42]*/
b[i][j][0][2] = dt * ty2;
/*[13, 14, 26, 42]*/
b[i][j][0][3] = 0.0;
/*[13, 14, 26, 42]*/
b[i][j][0][4] = 0.0;
/*[13, 14, 26, 42]*/
b[i][j][1][0] = dt * ty2 * (-(u[i][j + 1][k][1] * u[i][j + 1][k][2]) * tmp2) - dt * ty1 * (-c34 * tmp2 * u[i][j + 1][k][1]);
/*[13, 14, 26, 42]*/
b[i][j][1][1] = dt * ty2 * (u[i][j + 1][k][2] * tmp1) - dt * ty1 * (c34 * tmp1) - dt * ty1 * dy2;
/*[13, 14, 26, 42]*/
b[i][j][1][2] = dt * ty2 * (u[i][j + 1][k][1] * tmp1);
/*[13, 14, 26, 42]*/
b[i][j][1][3] = 0.0;
/*[13, 14, 26, 42]*/
b[i][j][1][4] = 0.0;
/*[13, 14, 26, 42]*/
b[i][j][2][0] = dt * ty2 * (-(u[i][j + 1][k][2] * tmp1) * (u[i][j + 1][k][2] * tmp1) + 0.50 * 0.40e+00 * ((u[i][j + 1][k][1] * u[i][j + 1][k][1] + u[i][j + 1][k][2] * u[i][j + 1][k][2] + u[i][j + 1][k][3] * u[i][j + 1][k][3]) * tmp2)) - dt * ty1 * (-r43 * c34 * tmp2 * u[i][j + 1][k][2]);
/*[13, 14, 26, 42]*/
b[i][j][2][1] = dt * ty2 * (-0.40e+00 * (u[i][j + 1][k][1] * tmp1));
/*[13, 14, 26, 42]*/
b[i][j][2][2] = dt * ty2 * ((2.0 - 0.40e+00) * (u[i][j + 1][k][2] * tmp1)) - dt * ty1 * (r43 * c34 * tmp1) - dt * ty1 * dy3;
/*[13, 14, 26, 42]*/
b[i][j][2][3] = dt * ty2 * (-0.40e+00 * (u[i][j + 1][k][3] * tmp1));
/*[13, 14, 26, 42]*/
b[i][j][2][4] = dt * ty2 * 0.40e+00;
/*[13, 14, 26, 42]*/
b[i][j][3][0] = dt * ty2 * (-(u[i][j + 1][k][2] * u[i][j + 1][k][3]) * tmp2) - dt * ty1 * (-c34 * tmp2 * u[i][j + 1][k][3]);
/*[13, 14, 26, 42]*/
b[i][j][3][1] = 0.0;
/*[13, 14, 26, 42]*/
b[i][j][3][2] = dt * ty2 * (u[i][j + 1][k][3] * tmp1);
/*[13, 14, 26, 42]*/
b[i][j][3][3] = dt * ty2 * (u[i][j + 1][k][2] * tmp1) - dt * ty1 * (c34 * tmp1) - dt * ty1 * dy4;
/*[13, 14, 26, 42]*/
b[i][j][3][4] = 0.0;
/*[13, 14, 26, 42]*/
b[i][j][4][0] = dt * ty2 * ((0.40e+00 * (u[i][j + 1][k][1] * u[i][j + 1][k][1] + u[i][j + 1][k][2] * u[i][j + 1][k][2] + u[i][j + 1][k][3] * u[i][j + 1][k][3]) * tmp2 - 1.40e+00 * (u[i][j + 1][k][4] * tmp1)) * (u[i][j + 1][k][2] * tmp1)) - dt * ty1 * (-(c34 - c1345) * tmp3 * (((u[i][j + 1][k][1]) * (u[i][j + 1][k][1]))) - (r43 * c34 - c1345) * tmp3 * (((u[i][j + 1][k][2]) * (u[i][j + 1][k][2]))) - (c34 - c1345) * tmp3 * (((u[i][j + 1][k][3]) * (u[i][j + 1][k][3]))) - c1345 * tmp2 * u[i][j + 1][k][4]);
/*[13, 14, 26, 42]*/
b[i][j][4][1] = dt * ty2 * (-0.40e+00 * (u[i][j + 1][k][1] * u[i][j + 1][k][2]) * tmp2) - dt * ty1 * (c34 - c1345) * tmp2 * u[i][j + 1][k][1];
/*[13, 14, 26, 42]*/
b[i][j][4][2] = dt * ty2 * (1.40e+00 * (u[i][j + 1][k][4] * tmp1) - 0.50 * 0.40e+00 * ((u[i][j + 1][k][1] * u[i][j + 1][k][1] + 3.0 * u[i][j + 1][k][2] * u[i][j + 1][k][2] + u[i][j + 1][k][3] * u[i][j + 1][k][3]) * tmp2)) - dt * ty1 * (r43 * c34 - c1345) * tmp2 * u[i][j + 1][k][2];
/*[13, 14, 26, 42]*/
b[i][j][4][3] = dt * ty2 * (-0.40e+00 * (u[i][j + 1][k][2] * u[i][j + 1][k][3]) * tmp2) - dt * ty1 * (c34 - c1345) * tmp2 * u[i][j + 1][k][3];
/*[13, 14, 26, 42]*/
b[i][j][4][4] = dt * ty2 * (1.40e+00 * (u[i][j + 1][k][2] * tmp1)) - dt * ty1 * c1345 * tmp1 - dt * ty1 * dy5;
/*[13, 14, 26, 42]*/
tmp1 = 1.0 / u[i][j][k + 1][0];
/*[13, 14, 26, 42]*/
tmp2 = tmp1 * tmp1;
/*[13, 14, 26, 42]*/
tmp3 = tmp1 * tmp2;
/*[13, 14, 26, 42]*/
c[i][j][0][0] = -dt * tz1 * dz1;
/*[13, 14, 26, 42]*/
c[i][j][0][1] = 0.0;
/*[13, 14, 26, 42]*/
c[i][j][0][2] = 0.0;
/*[13, 14, 26, 42]*/
c[i][j][0][3] = dt * tz2;
/*[13, 14, 26, 42]*/
c[i][j][0][4] = 0.0;
/*[13, 14, 26, 42]*/
c[i][j][1][0] = dt * tz2 * (-(u[i][j][k + 1][1] * u[i][j][k + 1][3]) * tmp2) - dt * tz1 * (-c34 * tmp2 * u[i][j][k + 1][1]);
/*[13, 14, 26, 42]*/
c[i][j][1][1] = dt * tz2 * (u[i][j][k + 1][3] * tmp1) - dt * tz1 * c34 * tmp1 - dt * tz1 * dz2;
/*[13, 14, 26, 42]*/
c[i][j][1][2] = 0.0;
/*[13, 14, 26, 42]*/
c[i][j][1][3] = dt * tz2 * (u[i][j][k + 1][1] * tmp1);
/*[13, 14, 26, 42]*/
c[i][j][1][4] = 0.0;
/*[13, 14, 26, 42]*/
c[i][j][2][0] = dt * tz2 * (-(u[i][j][k + 1][2] * u[i][j][k + 1][3]) * tmp2) - dt * tz1 * (-c34 * tmp2 * u[i][j][k + 1][2]);
/*[13, 14, 26, 42]*/
c[i][j][2][1] = 0.0;
/*[13, 14, 26, 42]*/
c[i][j][2][2] = dt * tz2 * (u[i][j][k + 1][3] * tmp1) - dt * tz1 * (c34 * tmp1) - dt * tz1 * dz3;
/*[13, 14, 26, 42]*/
c[i][j][2][3] = dt * tz2 * (u[i][j][k + 1][2] * tmp1);
/*[13, 14, 26, 42]*/
c[i][j][2][4] = 0.0;
/*[13, 14, 26, 42]*/
c[i][j][3][0] = dt * tz2 * (-(u[i][j][k + 1][3] * tmp1) * (u[i][j][k + 1][3] * tmp1) + 0.50 * 0.40e+00 * ((u[i][j][k + 1][1] * u[i][j][k + 1][1] + u[i][j][k + 1][2] * u[i][j][k + 1][2] + u[i][j][k + 1][3] * u[i][j][k + 1][3]) * tmp2)) - dt * tz1 * (-r43 * c34 * tmp2 * u[i][j][k + 1][3]);
/*[13, 14, 26, 42]*/
c[i][j][3][1] = dt * tz2 * (-0.40e+00 * (u[i][j][k + 1][1] * tmp1));
/*[13, 14, 26, 42]*/
c[i][j][3][2] = dt * tz2 * (-0.40e+00 * (u[i][j][k + 1][2] * tmp1));
/*[13, 14, 26, 42]*/
c[i][j][3][3] = dt * tz2 * (2.0 - 0.40e+00) * (u[i][j][k + 1][3] * tmp1) - dt * tz1 * (r43 * c34 * tmp1) - dt * tz1 * dz4;
/*[13, 14, 26, 42]*/
c[i][j][3][4] = dt * tz2 * 0.40e+00;
/*[13, 14, 26, 42]*/
c[i][j][4][0] = dt * tz2 * ((0.40e+00 * (u[i][j][k + 1][1] * u[i][j][k + 1][1] + u[i][j][k + 1][2] * u[i][j][k + 1][2] + u[i][j][k + 1][3] * u[i][j][k + 1][3]) * tmp2 - 1.40e+00 * (u[i][j][k + 1][4] * tmp1)) * (u[i][j][k + 1][3] * tmp1)) - dt * tz1 * (-(c34 - c1345) * tmp3 * (((u[i][j][k + 1][1]) * (u[i][j][k + 1][1]))) - (c34 - c1345) * tmp3 * (((u[i][j][k + 1][2]) * (u[i][j][k + 1][2]))) - (r43 * c34 - c1345) * tmp3 * (((u[i][j][k + 1][3]) * (u[i][j][k + 1][3]))) - c1345 * tmp2 * u[i][j][k + 1][4]);
/*[13, 14, 26, 42]*/
c[i][j][4][1] = dt * tz2 * (-0.40e+00 * (u[i][j][k + 1][1] * u[i][j][k + 1][3]) * tmp2) - dt * tz1 * (c34 - c1345) * tmp2 * u[i][j][k + 1][1];
/*[13, 14, 26, 42]*/
c[i][j][4][2] = dt * tz2 * (-0.40e+00 * (u[i][j][k + 1][2] * u[i][j][k + 1][3]) * tmp2) - dt * tz1 * (c34 - c1345) * tmp2 * u[i][j][k + 1][2];
/*[13, 14, 26, 42]*/
c[i][j][4][3] = dt * tz2 * (1.40e+00 * (u[i][j][k + 1][4] * tmp1) - 0.50 * 0.40e+00 * ((u[i][j][k + 1][1] * u[i][j][k + 1][1] + u[i][j][k + 1][2] * u[i][j][k + 1][2] + 3.0 * u[i][j][k + 1][3] * u[i][j][k + 1][3]) * tmp2)) - dt * tz1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k + 1][3];
/*[13, 14, 26, 42]*/
c[i][j][4][4] = dt * tz2 * (1.40e+00 * (u[i][j][k + 1][3] * tmp1)) - dt * tz1 * c1345 * tmp1 - dt * tz1 * dz5;
}
}
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
static void l2norm(int nx0, int ny0 , int nz0 , int ist , int iend , int jst , int jend , double v[12][12 / 2 * 2 + 1][12 / 2 * 2 + 1][5] , double sum[5]) {
/*[]*/
/*[]*/
int i;
/*[]*/
int j;
/*[]*/
int k;
/*[]*/
int m;
/*[]*/
double sum0 = 0.0;
/*[]*/
double sum1 = 0.0;
/*[]*/
double sum2 = 0.0;
/*[]*/
double sum3 = 0.0;
/*[]*/
double sum4 = 0.0;
/*[]*/
#pragma omp single nowait
{
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (m = 0; m < 5; m++) {
/*[]*/
/*[]*/
sum[m] = 0.0;
}
}
/*[]*/
#pragma omp for nowait
/*[]*/
/*[]*/
/*[]*/
for (i = ist; i <= iend; i++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (j = jst; j <= jend; j++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (k = 1; k <= nz0 - 2; k++) {
/*[]*/
/*[]*/
sum0 = sum0 + v[i][j][k][0] * v[i][j][k][0];
/*[]*/
sum1 = sum1 + v[i][j][k][1] * v[i][j][k][1];
/*[]*/
sum2 = sum2 + v[i][j][k][2] * v[i][j][k][2];
/*[]*/
sum3 = sum3 + v[i][j][k][3] * v[i][j][k][3];
/*[]*/
sum4 = sum4 + v[i][j][k][4] * v[i][j][k][4];
}
}
}
/*[]*/
// #pragma omp dummyFlush CRITICAL_START
/*[]*/
#pragma omp critical
{
/*[]*/
/*[]*/
sum[0] += sum0;
/*[]*/
sum[1] += sum1;
/*[]*/
sum[2] += sum2;
/*[]*/
sum[3] += sum3;
/*[]*/
sum[4] += sum4;
}
/*[]*/
// #pragma omp dummyFlush CRITICAL_END
/*[]*/
#pragma omp single nowait
{
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (m = 0; m < 5; m++) {
/*[]*/
/*[]*/
double _imopVarPre154;
/*[]*/
double _imopVarPre155;
/*[]*/
_imopVarPre154 = sum[m] / ((nx0 - 2) * (ny0 - 2) * (nz0 - 2));
/*[]*/
_imopVarPre155 = sqrt(_imopVarPre154);
/*[]*/
/*[]*/
sum[m] = _imopVarPre155;
}
}
}
/*[]*/
static void pintgr() {
/*[]*/
/*[]*/
int i;
/*[]*/
int j;
/*[]*/
int k;
/*[]*/
int ibeg;
/*[]*/
int ifin;
/*[]*/
int ifin1;
/*[]*/
int jbeg;
/*[]*/
int jfin;
/*[]*/
int jfin1;
/*[]*/
int iglob;
/*[]*/
int iglob1;
/*[]*/
int iglob2;
/*[]*/
int jglob;
/*[]*/
int jglob1;
/*[]*/
int jglob2;
/*[]*/
double phi1[12 + 2][12 + 2];
/*[]*/
double phi2[12 + 2][12 + 2];
/*[]*/
double frc1;
/*[]*/
double frc2;
/*[]*/
double frc3;
/*[]*/
ibeg = nx;
/*[]*/
ifin = 0;
/*[]*/
iglob1 = -1;
/*[]*/
iglob2 = nx - 1;
/*[]*/
int _imopVarPre157;
/*[]*/
_imopVarPre157 = iglob1 >= ii1;
/*[]*/
/*[]*/
if (_imopVarPre157) {
/*[]*/
/*[]*/
_imopVarPre157 = iglob2 < ii2 + nx;
}
/*[]*/
/*[]*/
if (_imopVarPre157) {
/*[]*/
/*[]*/
ibeg = 0;
}
/*[]*/
int _imopVarPre159;
/*[]*/
_imopVarPre159 = iglob1 >= ii1 - nx;
/*[]*/
/*[]*/
if (_imopVarPre159) {
/*[]*/
/*[]*/
_imopVarPre159 = iglob2 <= ii2;
}
/*[]*/
/*[]*/
if (_imopVarPre159) {
/*[]*/
/*[]*/
ifin = nx;
}
/*[]*/
int _imopVarPre161;
/*[]*/
_imopVarPre161 = ii1 >= iglob1;
/*[]*/
/*[]*/
if (_imopVarPre161) {
/*[]*/
/*[]*/
_imopVarPre161 = ii1 <= iglob2;
}
/*[]*/
/*[]*/
if (_imopVarPre161) {
/*[]*/
/*[]*/
ibeg = ii1;
}
/*[]*/
int _imopVarPre163;
/*[]*/
_imopVarPre163 = ii2 >= iglob1;
/*[]*/
/*[]*/
if (_imopVarPre163) {
/*[]*/
/*[]*/
_imopVarPre163 = ii2 <= iglob2;
}
/*[]*/
/*[]*/
if (_imopVarPre163) {
/*[]*/
/*[]*/
ifin = ii2;
}
/*[]*/
jbeg = ny;
/*[]*/
jfin = -1;
/*[]*/
jglob1 = 0;
/*[]*/
jglob2 = ny - 1;
/*[]*/
int _imopVarPre165;
/*[]*/
_imopVarPre165 = jglob1 >= ji1;
/*[]*/
/*[]*/
if (_imopVarPre165) {
/*[]*/
/*[]*/
_imopVarPre165 = jglob2 < ji2 + ny;
}
/*[]*/
/*[]*/
if (_imopVarPre165) {
/*[]*/
/*[]*/
jbeg = 0;
}
/*[]*/
int _imopVarPre167;
/*[]*/
_imopVarPre167 = jglob1 > ji1 - ny;
/*[]*/
/*[]*/
if (_imopVarPre167) {
/*[]*/
/*[]*/
_imopVarPre167 = jglob2 <= ji2;
}
/*[]*/
/*[]*/
if (_imopVarPre167) {
/*[]*/
/*[]*/
jfin = ny;
}
/*[]*/
int _imopVarPre169;
/*[]*/
_imopVarPre169 = ji1 >= jglob1;
/*[]*/
/*[]*/
if (_imopVarPre169) {
/*[]*/
/*[]*/
_imopVarPre169 = ji1 <= jglob2;
}
/*[]*/
/*[]*/
if (_imopVarPre169) {
/*[]*/
/*[]*/
jbeg = ji1;
}
/*[]*/
int _imopVarPre171;
/*[]*/
_imopVarPre171 = ji2 >= jglob1;
/*[]*/
/*[]*/
if (_imopVarPre171) {
/*[]*/
/*[]*/
_imopVarPre171 = ji2 <= jglob2;
}
/*[]*/
/*[]*/
if (_imopVarPre171) {
/*[]*/
/*[]*/
jfin = ji2;
}
/*[]*/
ifin1 = ifin;
/*[]*/
jfin1 = jfin;
/*[]*/
/*[]*/
if (ifin1 == ii2) {
/*[]*/
/*[]*/
ifin1 = ifin - 1;
}
/*[]*/
/*[]*/
if (jfin1 == ji2) {
/*[]*/
/*[]*/
jfin1 = jfin - 1;
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (i = 0; i <= 12 + 1; i++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (k = 0; k <= 12 + 1; k++) {
/*[]*/
/*[]*/
phi1[i][k] = 0.0;
/*[]*/
phi2[i][k] = 0.0;
}
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (i = ibeg; i <= ifin; i++) {
/*[]*/
/*[]*/
iglob = i;
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (j = jbeg; j <= jfin; j++) {
/*[]*/
/*[]*/
jglob = j;
/*[]*/
k = ki1;
/*[]*/
phi1[i][j] = 0.40e+00 * (u[i][j][k][4] - 0.50 * (((u[i][j][k][1]) * (u[i][j][k][1])) + ((u[i][j][k][2]) * (u[i][j][k][2])) + ((u[i][j][k][3]) * (u[i][j][k][3]))) / u[i][j][k][0]);
/*[]*/
k = ki2;
/*[]*/
phi2[i][j] = 0.40e+00 * (u[i][j][k][4] - 0.50 * (((u[i][j][k][1]) * (u[i][j][k][1])) + ((u[i][j][k][2]) * (u[i][j][k][2])) + ((u[i][j][k][3]) * (u[i][j][k][3]))) / u[i][j][k][0]);
}
}
/*[]*/
frc1 = 0.0;
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (i = ibeg; i <= ifin1; i++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (j = jbeg; j <= jfin1; j++) {
/*[]*/
/*[]*/
frc1 = frc1 + (phi1[i][j] + phi1[i + 1][j] + phi1[i][j + 1] + phi1[i + 1][j + 1] + phi2[i][j] + phi2[i + 1][j] + phi2[i][j + 1] + phi2[i + 1][j + 1]);
}
}
/*[]*/
frc1 = dxi * deta * frc1;
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (i = 0; i <= 12 + 1; i++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (k = 0; k <= 12 + 1; k++) {
/*[]*/
/*[]*/
phi1[i][k] = 0.0;
/*[]*/
phi2[i][k] = 0.0;
}
}
/*[]*/
jglob = jbeg;
/*[]*/
/*[]*/
if (jglob == ji1) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (i = ibeg; i <= ifin; i++) {
/*[]*/
/*[]*/
iglob = i;
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (k = ki1; k <= ki2; k++) {
/*[]*/
/*[]*/
phi1[i][k] = 0.40e+00 * (u[i][jbeg][k][4] - 0.50 * (((u[i][jbeg][k][1]) * (u[i][jbeg][k][1])) + ((u[i][jbeg][k][2]) * (u[i][jbeg][k][2])) + ((u[i][jbeg][k][3]) * (u[i][jbeg][k][3]))) / u[i][jbeg][k][0]);
}
}
}
/*[]*/
jglob = jfin;
/*[]*/
/*[]*/
if (jglob == ji2) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (i = ibeg; i <= ifin; i++) {
/*[]*/
/*[]*/
iglob = i;
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (k = ki1; k <= ki2; k++) {
/*[]*/
/*[]*/
phi2[i][k] = 0.40e+00 * (u[i][jfin][k][4] - 0.50 * (((u[i][jfin][k][1]) * (u[i][jfin][k][1])) + ((u[i][jfin][k][2]) * (u[i][jfin][k][2])) + ((u[i][jfin][k][3]) * (u[i][jfin][k][3]))) / u[i][jfin][k][0]);
}
}
}
/*[]*/
frc2 = 0.0;
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (i = ibeg; i <= ifin1; i++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (k = ki1; k <= ki2 - 1; k++) {
/*[]*/
/*[]*/
frc2 = frc2 + (phi1[i][k] + phi1[i + 1][k] + phi1[i][k + 1] + phi1[i + 1][k + 1] + phi2[i][k] + phi2[i + 1][k] + phi2[i][k + 1] + phi2[i + 1][k + 1]);
}
}
/*[]*/
frc2 = dxi * dzeta * frc2;
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (i = 0; i <= 12 + 1; i++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (k = 0; k <= 12 + 1; k++) {
/*[]*/
/*[]*/
phi1[i][k] = 0.0;
/*[]*/
phi2[i][k] = 0.0;
}
}
/*[]*/
iglob = ibeg;
/*[]*/
/*[]*/
if (iglob == ii1) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (j = jbeg; j <= jfin; j++) {
/*[]*/
/*[]*/
jglob = j;
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (k = ki1; k <= ki2; k++) {
/*[]*/
/*[]*/
phi1[j][k] = 0.40e+00 * (u[ibeg][j][k][4] - 0.50 * (((u[ibeg][j][k][1]) * (u[ibeg][j][k][1])) + ((u[ibeg][j][k][2]) * (u[ibeg][j][k][2])) + ((u[ibeg][j][k][3]) * (u[ibeg][j][k][3]))) / u[ibeg][j][k][0]);
}
}
}
/*[]*/
iglob = ifin;
/*[]*/
/*[]*/
if (iglob == ii2) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (j = jbeg; j <= jfin; j++) {
/*[]*/
/*[]*/
jglob = j;
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (k = ki1; k <= ki2; k++) {
/*[]*/
/*[]*/
phi2[j][k] = 0.40e+00 * (u[ifin][j][k][4] - 0.50 * (((u[ifin][j][k][1]) * (u[ifin][j][k][1])) + ((u[ifin][j][k][2]) * (u[ifin][j][k][2])) + ((u[ifin][j][k][3]) * (u[ifin][j][k][3]))) / u[ifin][j][k][0]);
}
}
}
/*[]*/
frc3 = 0.0;
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (j = jbeg; j <= jfin1; j++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (k = ki1; k <= ki2 - 1; k++) {
/*[]*/
/*[]*/
frc3 = frc3 + (phi1[j][k] + phi1[j + 1][k] + phi1[j][k + 1] + phi1[j + 1][k + 1] + phi2[j][k] + phi2[j + 1][k] + phi2[j][k + 1] + phi2[j + 1][k + 1]);
}
}
/*[]*/
frc3 = deta * dzeta * frc3;
/*[]*/
frc = 0.25 * (frc1 + frc2 + frc3);
}
/*[]*/
static void read_input() {
/*[]*/
/*[]*/
FILE *fp;
/*[]*/
printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version" " - LU Benchmark\n\n");
/*[]*/
/*[]*/
fp = fopen("inputlu.data", "r");
/*[]*/
/*[]*/
/*[]*/
if (fp != ((void *) 0)) {
/*[]*/
/*[]*/
printf(" Reading from input file inputlu.data\n");
/*[]*/
/*[]*/
int _imopVarPre173;
/*[]*/
_imopVarPre173 = fgetc(fp);
/*[]*/
/*[]*/
/*[]*/
while (_imopVarPre173 != '\n') {
/*[]*/
/*[]*/
;
/*[]*/
_imopVarPre173 = fgetc(fp);
/*[]*/
}
/*[]*/
int _imopVarPre175;
/*[]*/
_imopVarPre175 = fgetc(fp);
/*[]*/
/*[]*/
/*[]*/
while (_imopVarPre175 != '\n') {
/*[]*/
/*[]*/
;
/*[]*/
_imopVarPre175 = fgetc(fp);
/*[]*/
}
/*[]*/
int *_imopVarPre178;
/*[]*/
int *_imopVarPre179;
/*[]*/
_imopVarPre178 = &inorm;
/*[]*/
_imopVarPre179 = &ipr;
/*[]*/
fscanf(fp, "%d%d", _imopVarPre179, _imopVarPre178);
/*[]*/
/*[]*/
int _imopVarPre181;
/*[]*/
_imopVarPre181 = fgetc(fp);
/*[]*/
/*[]*/
/*[]*/
while (_imopVarPre181 != '\n') {
/*[]*/
/*[]*/
;
/*[]*/
_imopVarPre181 = fgetc(fp);
/*[]*/
}
/*[]*/
int _imopVarPre183;
/*[]*/
_imopVarPre183 = fgetc(fp);
/*[]*/
/*[]*/
/*[]*/
while (_imopVarPre183 != '\n') {
/*[]*/
/*[]*/
;
/*[]*/
_imopVarPre183 = fgetc(fp);
/*[]*/
}
/*[]*/
int _imopVarPre185;
/*[]*/
_imopVarPre185 = fgetc(fp);
/*[]*/
/*[]*/
/*[]*/
while (_imopVarPre185 != '\n') {
/*[]*/
/*[]*/
;
/*[]*/
_imopVarPre185 = fgetc(fp);
/*[]*/
}
/*[]*/
int *_imopVarPre187;
/*[]*/
_imopVarPre187 = &itmax;
/*[]*/
fscanf(fp, "%d", _imopVarPre187);
/*[]*/
/*[]*/
int _imopVarPre189;
/*[]*/
_imopVarPre189 = fgetc(fp);
/*[]*/
/*[]*/
/*[]*/
while (_imopVarPre189 != '\n') {
/*[]*/
/*[]*/
;
/*[]*/
_imopVarPre189 = fgetc(fp);
/*[]*/
}
/*[]*/
int _imopVarPre191;
/*[]*/
_imopVarPre191 = fgetc(fp);
/*[]*/
/*[]*/
/*[]*/
while (_imopVarPre191 != '\n') {
/*[]*/
/*[]*/
;
/*[]*/
_imopVarPre191 = fgetc(fp);
/*[]*/
}
/*[]*/
int _imopVarPre193;
/*[]*/
_imopVarPre193 = fgetc(fp);
/*[]*/
/*[]*/
/*[]*/
while (_imopVarPre193 != '\n') {
/*[]*/
/*[]*/
;
/*[]*/
_imopVarPre193 = fgetc(fp);
/*[]*/
}
/*[]*/
double *_imopVarPre195;
/*[]*/
_imopVarPre195 = &dt;
/*[]*/
fscanf(fp, "%lf", _imopVarPre195);
/*[]*/
/*[]*/
int _imopVarPre197;
/*[]*/
_imopVarPre197 = fgetc(fp);
/*[]*/
/*[]*/
/*[]*/
while (_imopVarPre197 != '\n') {
/*[]*/
/*[]*/
;
/*[]*/
_imopVarPre197 = fgetc(fp);
/*[]*/
}
/*[]*/
int _imopVarPre199;
/*[]*/
_imopVarPre199 = fgetc(fp);
/*[]*/
/*[]*/
/*[]*/
while (_imopVarPre199 != '\n') {
/*[]*/
/*[]*/
;
/*[]*/
_imopVarPre199 = fgetc(fp);
/*[]*/
}
/*[]*/
int _imopVarPre201;
/*[]*/
_imopVarPre201 = fgetc(fp);
/*[]*/
/*[]*/
/*[]*/
while (_imopVarPre201 != '\n') {
/*[]*/
/*[]*/
;
/*[]*/
_imopVarPre201 = fgetc(fp);
/*[]*/
}
/*[]*/
double *_imopVarPre203;
/*[]*/
_imopVarPre203 = ω
/*[]*/
fscanf(fp, "%lf", _imopVarPre203);
/*[]*/
/*[]*/
int _imopVarPre205;
/*[]*/
_imopVarPre205 = fgetc(fp);
/*[]*/
/*[]*/
/*[]*/
while (_imopVarPre205 != '\n') {
/*[]*/
/*[]*/
;
/*[]*/
_imopVarPre205 = fgetc(fp);
/*[]*/
}
/*[]*/
int _imopVarPre207;
/*[]*/
_imopVarPre207 = fgetc(fp);
/*[]*/
/*[]*/
/*[]*/
while (_imopVarPre207 != '\n') {
/*[]*/
/*[]*/
;
/*[]*/
_imopVarPre207 = fgetc(fp);
/*[]*/
}
/*[]*/
int _imopVarPre209;
/*[]*/
_imopVarPre209 = fgetc(fp);
/*[]*/
/*[]*/
/*[]*/
while (_imopVarPre209 != '\n') {
/*[]*/
/*[]*/
;
/*[]*/
_imopVarPre209 = fgetc(fp);
/*[]*/
}
/*[]*/
double *_imopVarPre215;
/*[]*/
double *_imopVarPre216;
/*[]*/
double *_imopVarPre217;
/*[]*/
double *_imopVarPre218;
/*[]*/
double *_imopVarPre219;
/*[]*/
_imopVarPre215 = &tolrsd[4];
/*[]*/
_imopVarPre216 = &tolrsd[3];
/*[]*/
_imopVarPre217 = &tolrsd[2];
/*[]*/
_imopVarPre218 = &tolrsd[1];
/*[]*/
_imopVarPre219 = &tolrsd[0];
/*[]*/
fscanf(fp, "%lf%lf%lf%lf%lf", _imopVarPre219, _imopVarPre218, _imopVarPre217, _imopVarPre216, _imopVarPre215);
/*[]*/
/*[]*/
int _imopVarPre221;
/*[]*/
_imopVarPre221 = fgetc(fp);
/*[]*/
/*[]*/
/*[]*/
while (_imopVarPre221 != '\n') {
/*[]*/
/*[]*/
;
/*[]*/
_imopVarPre221 = fgetc(fp);
/*[]*/
}
/*[]*/
int _imopVarPre223;
/*[]*/
_imopVarPre223 = fgetc(fp);
/*[]*/
/*[]*/
/*[]*/
while (_imopVarPre223 != '\n') {
/*[]*/
/*[]*/
;
/*[]*/
_imopVarPre223 = fgetc(fp);
/*[]*/
}
/*[]*/
int _imopVarPre225;
/*[]*/
_imopVarPre225 = fgetc(fp);
/*[]*/
/*[]*/
/*[]*/
while (_imopVarPre225 != '\n') {
/*[]*/
/*[]*/
;
/*[]*/
_imopVarPre225 = fgetc(fp);
/*[]*/
}
/*[]*/
int *_imopVarPre229;
/*[]*/
int *_imopVarPre230;
/*[]*/
int *_imopVarPre231;
/*[]*/
_imopVarPre229 = &nz0;
/*[]*/
_imopVarPre230 = &ny0;
/*[]*/
_imopVarPre231 = &nx0;
/*[]*/
fscanf(fp, "%d%d%d", _imopVarPre231, _imopVarPre230, _imopVarPre229);
/*[]*/
/*[]*/
int _imopVarPre233;
/*[]*/
_imopVarPre233 = fgetc(fp);
/*[]*/
/*[]*/
/*[]*/
while (_imopVarPre233 != '\n') {
/*[]*/
/*[]*/
;
/*[]*/
_imopVarPre233 = fgetc(fp);
/*[]*/
}
/*[]*/
fclose(fp);
/*[]*/
} else {
/*[]*/
/*[]*/
ipr = 1;
/*[]*/
inorm = 50;
/*[]*/
itmax = 50;
/*[]*/
dt = 0.5;
/*[]*/
omega = 1.2;
/*[]*/
tolrsd[0] = 1.0e-8;
/*[]*/
tolrsd[1] = 1.0e-8;
/*[]*/
tolrsd[2] = 1.0e-8;
/*[]*/
tolrsd[3] = 1.0e-8;
/*[]*/
tolrsd[4] = 1.0e-8;
/*[]*/
nx0 = 12;
/*[]*/
ny0 = 12;
/*[]*/
nz0 = 12;
}
/*[]*/
int _imopVarPre234;
/*[]*/
int _imopVarPre235;
/*[]*/
_imopVarPre234 = nx0 < 4;
/*[]*/
/*[]*/
if (!_imopVarPre234) {
/*[]*/
/*[]*/
_imopVarPre235 = ny0 < 4;
/*[]*/
/*[]*/
if (!_imopVarPre235) {
/*[]*/
/*[]*/
_imopVarPre235 = nz0 < 4;
}
/*[]*/
_imopVarPre234 = _imopVarPre235;
}
/*[]*/
/*[]*/
if (_imopVarPre234) {
/*[]*/
/*[]*/
printf(" PROBLEM SIZE IS TOO SMALL - \n" " SET EACH OF NX, NY AND NZ AT LEAST EQUAL TO 5\n");
/*[]*/
/*[]*/
exit(1);
/*[]*/
}
/*[]*/
int _imopVarPre236;
/*[]*/
int _imopVarPre237;
/*[]*/
_imopVarPre236 = nx0 > 12;
/*[]*/
/*[]*/
if (!_imopVarPre236) {
/*[]*/
/*[]*/
_imopVarPre237 = ny0 > 12;
/*[]*/
/*[]*/
if (!_imopVarPre237) {
/*[]*/
/*[]*/
_imopVarPre237 = nz0 > 12;
}
/*[]*/
_imopVarPre236 = _imopVarPre237;
}
/*[]*/
/*[]*/
if (_imopVarPre236) {
/*[]*/
/*[]*/
printf(" PROBLEM SIZE IS TOO LARGE - \n" " NX, NY AND NZ SHOULD BE EQUAL TO \n" " ISIZ1, ISIZ2 AND ISIZ3 RESPECTIVELY\n");
/*[]*/
/*[]*/
exit(1);
/*[]*/
}
/*[]*/
printf(" Size: %3dx%3dx%3d\n", nx0, ny0, nz0);
/*[]*/
/*[]*/
printf(" Iterations: %3d\n", itmax);
/*[]*/
}
/*[]*/
static void rhs() {
/*[]*/
/*[]*/
int i;
/*[]*/
int j;
/*[]*/
int k;
/*[]*/
int m;
/*[]*/
int L1;
/*[]*/
int L2;
/*[]*/
int ist1;
/*[]*/
int iend1;
/*[]*/
int jst1;
/*[]*/
int jend1;
/*[]*/
double q;
/*[]*/
double u21;
/*[]*/
double u31;
/*[]*/
double u41;
/*[]*/
double tmp;
/*[]*/
double u21i;
/*[]*/
double u31i;
/*[]*/
double u41i;
/*[]*/
double u51i;
/*[]*/
double u21j;
/*[]*/
double u31j;
/*[]*/
double u41j;
/*[]*/
double u51j;
/*[]*/
double u21k;
/*[]*/
double u31k;
/*[]*/
double u41k;
/*[]*/
double u51k;
/*[]*/
double u21im1;
/*[]*/
double u31im1;
/*[]*/
double u41im1;
/*[]*/
double u51im1;
/*[]*/
double u21jm1;
/*[]*/
double u31jm1;
/*[]*/
double u41jm1;
/*[]*/
double u51jm1;
/*[]*/
double u21km1;
/*[]*/
double u31km1;
/*[]*/
double u41km1;
/*[]*/
double u51km1;
/*[]*/
#pragma omp for nowait
/*[]*/
/*[]*/
/*[]*/
for (i = 0; i <= nx - 1; i++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (j = 0; j <= ny - 1; j++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (k = 0; k <= nz - 1; k++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (m = 0; m < 5; m++) {
/*[]*/
/*[]*/
rsd[i][j][k][m] = -frct[i][j][k][m];
}
}
}
}
/*[]*/
L1 = 0;
/*[]*/
L2 = nx - 1;
/*[]*/
#pragma omp for nowait
/*[]*/
/*[]*/
/*[]*/
for (i = L1; i <= L2; i++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (j = jst; j <= jend; j++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (k = 1; k <= nz - 2; k++) {
/*[]*/
/*[]*/
flux[i][j][k][0] = u[i][j][k][1];
/*[]*/
u21 = u[i][j][k][1] / u[i][j][k][0];
/*[]*/
q = 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) / u[i][j][k][0];
/*[]*/
flux[i][j][k][1] = u[i][j][k][1] * u21 + 0.40e+00 * (u[i][j][k][4] - q);
/*[]*/
flux[i][j][k][2] = u[i][j][k][2] * u21;
/*[]*/
flux[i][j][k][3] = u[i][j][k][3] * u21;
/*[]*/
flux[i][j][k][4] = (1.40e+00 * u[i][j][k][4] - 0.40e+00 * q) * u21;
}
}
}
/*[]*/
#pragma omp for nowait
/*[]*/
/*[]*/
/*[]*/
for (j = jst; j <= jend; j++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (k = 1; k <= nz - 2; k++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (i = ist; i <= iend; i++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (m = 0; m < 5; m++) {
/*[]*/
/*[]*/
rsd[i][j][k][m] = rsd[i][j][k][m] - tx2 * (flux[i + 1][j][k][m] - flux[i - 1][j][k][m]);
}
}
/*[]*/
L2 = nx - 1;
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (i = ist; i <= L2; i++) {
/*[]*/
/*[]*/
tmp = 1.0 / u[i][j][k][0];
/*[]*/
u21i = tmp * u[i][j][k][1];
/*[]*/
u31i = tmp * u[i][j][k][2];
/*[]*/
u41i = tmp * u[i][j][k][3];
/*[]*/
u51i = tmp * u[i][j][k][4];
/*[]*/
tmp = 1.0 / u[i - 1][j][k][0];
/*[]*/
u21im1 = tmp * u[i - 1][j][k][1];
/*[]*/
u31im1 = tmp * u[i - 1][j][k][2];
/*[]*/
u41im1 = tmp * u[i - 1][j][k][3];
/*[]*/
u51im1 = tmp * u[i - 1][j][k][4];
/*[]*/
flux[i][j][k][1] = (4.0 / 3.0) * tx3 * (u21i - u21im1);
/*[]*/
flux[i][j][k][2] = tx3 * (u31i - u31im1);
/*[]*/
flux[i][j][k][3] = tx3 * (u41i - u41im1);
/*[]*/
flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tx3 * (((u21i * u21i) + (u31i * u31i) + (u41i * u41i)) - ((u21im1 * u21im1) + (u31im1 * u31im1) + (u41im1 * u41im1))) + (1.0 / 6.0) * tx3 * ((u21i * u21i) - (u21im1 * u21im1)) + 1.40e+00 * 1.40e+00 * tx3 * (u51i - u51im1);
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (i = ist; i <= iend; i++) {
/*[]*/
/*[]*/
rsd[i][j][k][0] = rsd[i][j][k][0] + dx1 * tx1 * (u[i - 1][j][k][0] - 2.0 * u[i][j][k][0] + u[i + 1][j][k][0]);
/*[]*/
rsd[i][j][k][1] = rsd[i][j][k][1] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][1] - flux[i][j][k][1]) + dx2 * tx1 * (u[i - 1][j][k][1] - 2.0 * u[i][j][k][1] + u[i + 1][j][k][1]);
/*[]*/
rsd[i][j][k][2] = rsd[i][j][k][2] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][2] - flux[i][j][k][2]) + dx3 * tx1 * (u[i - 1][j][k][2] - 2.0 * u[i][j][k][2] + u[i + 1][j][k][2]);
/*[]*/
rsd[i][j][k][3] = rsd[i][j][k][3] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][3] - flux[i][j][k][3]) + dx4 * tx1 * (u[i - 1][j][k][3] - 2.0 * u[i][j][k][3] + u[i + 1][j][k][3]);
/*[]*/
rsd[i][j][k][4] = rsd[i][j][k][4] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][4] - flux[i][j][k][4]) + dx5 * tx1 * (u[i - 1][j][k][4] - 2.0 * u[i][j][k][4] + u[i + 1][j][k][4]);
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (m = 0; m < 5; m++) {
/*[]*/
/*[]*/
rsd[1][j][k][m] = rsd[1][j][k][m] - dssp * (+5.0 * u[1][j][k][m] - 4.0 * u[2][j][k][m] + u[3][j][k][m]);
/*[]*/
rsd[2][j][k][m] = rsd[2][j][k][m] - dssp * (-4.0 * u[1][j][k][m] + 6.0 * u[2][j][k][m] - 4.0 * u[3][j][k][m] + u[4][j][k][m]);
}
/*[]*/
ist1 = 3;
/*[]*/
iend1 = nx - 4;
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (i = ist1; i <= iend1; i++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (m = 0; m < 5; m++) {
/*[]*/
/*[]*/
rsd[i][j][k][m] = rsd[i][j][k][m] - dssp * (u[i - 2][j][k][m] - 4.0 * u[i - 1][j][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i + 1][j][k][m] + u[i + 2][j][k][m]);
}
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (m = 0; m < 5; m++) {
/*[]*/
/*[]*/
rsd[nx - 3][j][k][m] = rsd[nx - 3][j][k][m] - dssp * (u[nx - 5][j][k][m] - 4.0 * u[nx - 4][j][k][m] + 6.0 * u[nx - 3][j][k][m] - 4.0 * u[nx - 2][j][k][m]);
/*[]*/
rsd[nx - 2][j][k][m] = rsd[nx - 2][j][k][m] - dssp * (u[nx - 4][j][k][m] - 4.0 * u[nx - 3][j][k][m] + 5.0 * u[nx - 2][j][k][m]);
}
}
}
/*[]*/
L1 = 0;
/*[]*/
L2 = ny - 1;
/*[]*/
#pragma omp for nowait
/*[]*/
/*[]*/
/*[]*/
for (i = ist; i <= iend; i++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (j = L1; j <= L2; j++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (k = 1; k <= nz - 2; k++) {
/*[]*/
/*[]*/
flux[i][j][k][0] = u[i][j][k][2];
/*[]*/
u31 = u[i][j][k][2] / u[i][j][k][0];
/*[]*/
q = 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) / u[i][j][k][0];
/*[]*/
flux[i][j][k][1] = u[i][j][k][1] * u31;
/*[]*/
flux[i][j][k][2] = u[i][j][k][2] * u31 + 0.40e+00 * (u[i][j][k][4] - q);
/*[]*/
flux[i][j][k][3] = u[i][j][k][3] * u31;
/*[]*/
flux[i][j][k][4] = (1.40e+00 * u[i][j][k][4] - 0.40e+00 * q) * u31;
}
}
}
/*[]*/
#pragma omp for nowait
/*[]*/
/*[]*/
/*[]*/
for (i = ist; i <= iend; i++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (k = 1; k <= nz - 2; k++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (j = jst; j <= jend; j++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (m = 0; m < 5; m++) {
/*[]*/
/*[]*/
rsd[i][j][k][m] = rsd[i][j][k][m] - ty2 * (flux[i][j + 1][k][m] - flux[i][j - 1][k][m]);
}
}
/*[]*/
L2 = ny - 1;
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (j = jst; j <= L2; j++) {
/*[]*/
/*[]*/
tmp = 1.0 / u[i][j][k][0];
/*[]*/
u21j = tmp * u[i][j][k][1];
/*[]*/
u31j = tmp * u[i][j][k][2];
/*[]*/
u41j = tmp * u[i][j][k][3];
/*[]*/
u51j = tmp * u[i][j][k][4];
/*[]*/
tmp = 1.0 / u[i][j - 1][k][0];
/*[]*/
u21jm1 = tmp * u[i][j - 1][k][1];
/*[]*/
u31jm1 = tmp * u[i][j - 1][k][2];
/*[]*/
u41jm1 = tmp * u[i][j - 1][k][3];
/*[]*/
u51jm1 = tmp * u[i][j - 1][k][4];
/*[]*/
flux[i][j][k][1] = ty3 * (u21j - u21jm1);
/*[]*/
flux[i][j][k][2] = (4.0 / 3.0) * ty3 * (u31j - u31jm1);
/*[]*/
flux[i][j][k][3] = ty3 * (u41j - u41jm1);
/*[]*/
flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * ty3 * (((u21j * u21j) + (u31j * u31j) + (u41j * u41j)) - ((u21jm1 * u21jm1) + (u31jm1 * u31jm1) + (u41jm1 * u41jm1))) + (1.0 / 6.0) * ty3 * ((u31j * u31j) - (u31jm1 * u31jm1)) + 1.40e+00 * 1.40e+00 * ty3 * (u51j - u51jm1);
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (j = jst; j <= jend; j++) {
/*[]*/
/*[]*/
rsd[i][j][k][0] = rsd[i][j][k][0] + dy1 * ty1 * (u[i][j - 1][k][0] - 2.0 * u[i][j][k][0] + u[i][j + 1][k][0]);
/*[]*/
rsd[i][j][k][1] = rsd[i][j][k][1] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][1] - flux[i][j][k][1]) + dy2 * ty1 * (u[i][j - 1][k][1] - 2.0 * u[i][j][k][1] + u[i][j + 1][k][1]);
/*[]*/
rsd[i][j][k][2] = rsd[i][j][k][2] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][2] - flux[i][j][k][2]) + dy3 * ty1 * (u[i][j - 1][k][2] - 2.0 * u[i][j][k][2] + u[i][j + 1][k][2]);
/*[]*/
rsd[i][j][k][3] = rsd[i][j][k][3] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][3] - flux[i][j][k][3]) + dy4 * ty1 * (u[i][j - 1][k][3] - 2.0 * u[i][j][k][3] + u[i][j + 1][k][3]);
/*[]*/
rsd[i][j][k][4] = rsd[i][j][k][4] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][4] - flux[i][j][k][4]) + dy5 * ty1 * (u[i][j - 1][k][4] - 2.0 * u[i][j][k][4] + u[i][j + 1][k][4]);
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (m = 0; m < 5; m++) {
/*[]*/
/*[]*/
rsd[i][1][k][m] = rsd[i][1][k][m] - dssp * (+5.0 * u[i][1][k][m] - 4.0 * u[i][2][k][m] + u[i][3][k][m]);
/*[]*/
rsd[i][2][k][m] = rsd[i][2][k][m] - dssp * (-4.0 * u[i][1][k][m] + 6.0 * u[i][2][k][m] - 4.0 * u[i][3][k][m] + u[i][4][k][m]);
}
/*[]*/
jst1 = 3;
/*[]*/
jend1 = ny - 4;
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (j = jst1; j <= jend1; j++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (m = 0; m < 5; m++) {
/*[]*/
/*[]*/
rsd[i][j][k][m] = rsd[i][j][k][m] - dssp * (u[i][j - 2][k][m] - 4.0 * u[i][j - 1][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j + 1][k][m] + u[i][j + 2][k][m]);
}
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (m = 0; m < 5; m++) {
/*[]*/
/*[]*/
rsd[i][ny - 3][k][m] = rsd[i][ny - 3][k][m] - dssp * (u[i][ny - 5][k][m] - 4.0 * u[i][ny - 4][k][m] + 6.0 * u[i][ny - 3][k][m] - 4.0 * u[i][ny - 2][k][m]);
/*[]*/
rsd[i][ny - 2][k][m] = rsd[i][ny - 2][k][m] - dssp * (u[i][ny - 4][k][m] - 4.0 * u[i][ny - 3][k][m] + 5.0 * u[i][ny - 2][k][m]);
}
}
}
/*[]*/
#pragma omp for nowait
/*[]*/
/*[]*/
/*[]*/
for (i = ist; i <= iend; i++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (j = jst; j <= jend; j++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (k = 0; k <= nz - 1; k++) {
/*[]*/
/*[]*/
flux[i][j][k][0] = u[i][j][k][3];
/*[]*/
u41 = u[i][j][k][3] / u[i][j][k][0];
/*[]*/
q = 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) / u[i][j][k][0];
/*[]*/
flux[i][j][k][1] = u[i][j][k][1] * u41;
/*[]*/
flux[i][j][k][2] = u[i][j][k][2] * u41;
/*[]*/
flux[i][j][k][3] = u[i][j][k][3] * u41 + 0.40e+00 * (u[i][j][k][4] - q);
/*[]*/
flux[i][j][k][4] = (1.40e+00 * u[i][j][k][4] - 0.40e+00 * q) * u41;
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (k = 1; k <= nz - 2; k++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (m = 0; m < 5; m++) {
/*[]*/
/*[]*/
rsd[i][j][k][m] = rsd[i][j][k][m] - tz2 * (flux[i][j][k + 1][m] - flux[i][j][k - 1][m]);
}
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (k = 1; k <= nz - 1; k++) {
/*[]*/
/*[]*/
tmp = 1.0 / u[i][j][k][0];
/*[]*/
u21k = tmp * u[i][j][k][1];
/*[]*/
u31k = tmp * u[i][j][k][2];
/*[]*/
u41k = tmp * u[i][j][k][3];
/*[]*/
u51k = tmp * u[i][j][k][4];
/*[]*/
tmp = 1.0 / u[i][j][k - 1][0];
/*[]*/
u21km1 = tmp * u[i][j][k - 1][1];
/*[]*/
u31km1 = tmp * u[i][j][k - 1][2];
/*[]*/
u41km1 = tmp * u[i][j][k - 1][3];
/*[]*/
u51km1 = tmp * u[i][j][k - 1][4];
/*[]*/
flux[i][j][k][1] = tz3 * (u21k - u21km1);
/*[]*/
flux[i][j][k][2] = tz3 * (u31k - u31km1);
/*[]*/
flux[i][j][k][3] = (4.0 / 3.0) * tz3 * (u41k - u41km1);
/*[]*/
flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tz3 * (((u21k * u21k) + (u31k * u31k) + (u41k * u41k)) - ((u21km1 * u21km1) + (u31km1 * u31km1) + (u41km1 * u41km1))) + (1.0 / 6.0) * tz3 * ((u41k * u41k) - (u41km1 * u41km1)) + 1.40e+00 * 1.40e+00 * tz3 * (u51k - u51km1);
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (k = 1; k <= nz - 2; k++) {
/*[]*/
/*[]*/
rsd[i][j][k][0] = rsd[i][j][k][0] + dz1 * tz1 * (u[i][j][k - 1][0] - 2.0 * u[i][j][k][0] + u[i][j][k + 1][0]);
/*[]*/
rsd[i][j][k][1] = rsd[i][j][k][1] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][1] - flux[i][j][k][1]) + dz2 * tz1 * (u[i][j][k - 1][1] - 2.0 * u[i][j][k][1] + u[i][j][k + 1][1]);
/*[]*/
rsd[i][j][k][2] = rsd[i][j][k][2] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][2] - flux[i][j][k][2]) + dz3 * tz1 * (u[i][j][k - 1][2] - 2.0 * u[i][j][k][2] + u[i][j][k + 1][2]);
/*[]*/
rsd[i][j][k][3] = rsd[i][j][k][3] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][3] - flux[i][j][k][3]) + dz4 * tz1 * (u[i][j][k - 1][3] - 2.0 * u[i][j][k][3] + u[i][j][k + 1][3]);
/*[]*/
rsd[i][j][k][4] = rsd[i][j][k][4] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][4] - flux[i][j][k][4]) + dz5 * tz1 * (u[i][j][k - 1][4] - 2.0 * u[i][j][k][4] + u[i][j][k + 1][4]);
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (m = 0; m < 5; m++) {
/*[]*/
/*[]*/
rsd[i][j][1][m] = rsd[i][j][1][m] - dssp * (+5.0 * u[i][j][1][m] - 4.0 * u[i][j][2][m] + u[i][j][3][m]);
/*[]*/
rsd[i][j][2][m] = rsd[i][j][2][m] - dssp * (-4.0 * u[i][j][1][m] + 6.0 * u[i][j][2][m] - 4.0 * u[i][j][3][m] + u[i][j][4][m]);
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (k = 3; k <= nz - 4; k++) {
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (m = 0; m < 5; m++) {
/*[]*/
/*[]*/
rsd[i][j][k][m] = rsd[i][j][k][m] - dssp * (u[i][j][k - 2][m] - 4.0 * u[i][j][k - 1][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j][k + 1][m] + u[i][j][k + 2][m]);
}
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (m = 0; m < 5; m++) {
/*[]*/
/*[]*/
rsd[i][j][nz - 3][m] = rsd[i][j][nz - 3][m] - dssp * (u[i][j][nz - 5][m] - 4.0 * u[i][j][nz - 4][m] + 6.0 * u[i][j][nz - 3][m] - 4.0 * u[i][j][nz - 2][m]);
/*[]*/
rsd[i][j][nz - 2][m] = rsd[i][j][nz - 2][m] - dssp * (u[i][j][nz - 4][m] - 4.0 * u[i][j][nz - 3][m] + 5.0 * u[i][j][nz - 2][m]);
}
}
}
}
/*[]*/
static void setbv() {
/*[]*/
/*[49]*/
#pragma omp parallel
{
/*[49]*/
/*[49]*/
int i;
/*[49]*/
int j;
/*[49]*/
int k;
/*[49]*/
int iglob;
/*[49]*/
int jglob;
/*[49]*/
#pragma omp for nowait
/*[49]*/
/*[49]*/
/*[49]*/
for (i = 0; i < nx; i++) {
/*[49]*/
/*[49]*/
iglob = i;
/*[49]*/
/*[49]*/
/*[49]*/
/*[49]*/
for (j = 0; j < ny; j++) {
/*[49]*/
/*[49]*/
jglob = j;
/*[49]*/
double *_imopVarPre239;
/*[49]*/
_imopVarPre239 = &u[i][j][0][0];
/*[49]*/
exact(iglob, jglob, 0, _imopVarPre239);
/*[49]*/
/*[49]*/
double *_imopVarPre242;
/*[49]*/
int _imopVarPre243;
/*[49]*/
_imopVarPre242 = &u[i][j][nz - 1][0];
/*[49]*/
_imopVarPre243 = nz - 1;
/*[49]*/
exact(iglob, jglob, _imopVarPre243, _imopVarPre242);
/*[49]*/
}
}
/*[49]*/
// #pragma omp dummyFlush BARRIER_START
/*[49]*/
#pragma omp barrier
/*[50]*/
#pragma omp for nowait
/*[50]*/
/*[50]*/
/*[50]*/
for (i = 0; i < nx; i++) {
/*[50]*/
/*[50]*/
iglob = i;
/*[50]*/
/*[50]*/
/*[50]*/
/*[50]*/
for (k = 0; k < nz; k++) {
/*[50]*/
/*[50]*/
double *_imopVarPre245;
/*[50]*/
_imopVarPre245 = &u[i][0][k][0];
/*[50]*/
exact(iglob, 0, k, _imopVarPre245);
/*[50]*/
}
}
/*[50]*/
// #pragma omp dummyFlush BARRIER_START
/*[50]*/
#pragma omp barrier
/*[51]*/
#pragma omp for nowait
/*[51]*/
/*[51]*/
/*[51]*/
for (i = 0; i < nx; i++) {
/*[51]*/
/*[51]*/
iglob = i;
/*[51]*/
/*[51]*/
/*[51]*/
/*[51]*/
for (k = 0; k < nz; k++) {
/*[51]*/
/*[51]*/
double *_imopVarPre248;
/*[51]*/
int _imopVarPre249;
/*[51]*/
_imopVarPre248 = &u[i][ny - 1][k][0];
/*[51]*/
_imopVarPre249 = ny0 - 1;
/*[51]*/
exact(iglob, _imopVarPre249, k, _imopVarPre248);
/*[51]*/
}
}
/*[51]*/
// #pragma omp dummyFlush BARRIER_START
/*[51]*/
#pragma omp barrier
/*[52]*/
#pragma omp for nowait
/*[52]*/
/*[52]*/
/*[52]*/
for (j = 0; j < ny; j++) {
/*[52]*/
/*[52]*/
jglob = j;
/*[52]*/
/*[52]*/
/*[52]*/
/*[52]*/
for (k = 0; k < nz; k++) {
/*[52]*/
/*[52]*/
double *_imopVarPre251;
/*[52]*/
_imopVarPre251 = &u[0][j][k][0];
/*[52]*/
exact(0, jglob, k, _imopVarPre251);
/*[52]*/
}
}
/*[52]*/
// #pragma omp dummyFlush BARRIER_START
/*[52]*/
#pragma omp barrier
/*[53]*/
#pragma omp for nowait
/*[53]*/
/*[53]*/
/*[53]*/
for (j = 0; j < ny; j++) {
/*[53]*/
/*[53]*/
jglob = j;
/*[53]*/
/*[53]*/
/*[53]*/
/*[53]*/
for (k = 0; k < nz; k++) {
/*[53]*/
/*[53]*/
double *_imopVarPre254;
/*[53]*/
int _imopVarPre255;
/*[53]*/
_imopVarPre254 = &u[nx - 1][j][k][0];
/*[53]*/
_imopVarPre255 = nx0 - 1;
/*[53]*/
exact(_imopVarPre255, jglob, k, _imopVarPre254);
/*[53]*/
}
}
}
}
/*[]*/
static void setcoeff() {
/*[]*/
/*[]*/
dxi = 1.0 / (nx0 - 1);
/*[]*/
deta = 1.0 / (ny0 - 1);
/*[]*/
dzeta = 1.0 / (nz0 - 1);
/*[]*/
tx1 = 1.0 / (dxi * dxi);
/*[]*/
tx2 = 1.0 / (2.0 * dxi);
/*[]*/
tx3 = 1.0 / dxi;
/*[]*/
ty1 = 1.0 / (deta * deta);
/*[]*/
ty2 = 1.0 / (2.0 * deta);
/*[]*/
ty3 = 1.0 / deta;
/*[]*/
tz1 = 1.0 / (dzeta * dzeta);
/*[]*/
tz2 = 1.0 / (2.0 * dzeta);
/*[]*/
tz3 = 1.0 / dzeta;
/*[]*/
ii1 = 1;
/*[]*/
ii2 = nx0 - 2;
/*[]*/
ji1 = 1;
/*[]*/
ji2 = ny0 - 3;
/*[]*/
ki1 = 2;
/*[]*/
ki2 = nz0 - 2;
/*[]*/
dx1 = 0.75;
/*[]*/
dx2 = dx1;
/*[]*/
dx3 = dx1;
/*[]*/
dx4 = dx1;
/*[]*/
dx5 = dx1;
/*[]*/
dy1 = 0.75;
/*[]*/
dy2 = dy1;
/*[]*/
dy3 = dy1;
/*[]*/
dy4 = dy1;
/*[]*/
dy5 = dy1;
/*[]*/
dz1 = 1.00;
/*[]*/
dz2 = dz1;
/*[]*/
dz3 = dz1;
/*[]*/
dz4 = dz1;
/*[]*/
dz5 = dz1;
/*[]*/
int _imopVarPre348;
/*[]*/
double _imopVarPre349;
/*[]*/
int _imopVarPre350;
/*[]*/
double _imopVarPre351;
/*[]*/
int _imopVarPre358;
/*[]*/
double _imopVarPre359;
/*[]*/
_imopVarPre348 = (dy1 > dz1);
/*[]*/
/*[]*/
if (_imopVarPre348) {
/*[]*/
/*[]*/
_imopVarPre349 = dy1;
} else {
/*[]*/
/*[]*/
_imopVarPre349 = dz1;
}
/*[]*/
_imopVarPre350 = (dx1 > _imopVarPre349);
/*[]*/
/*[]*/
if (_imopVarPre350) {
/*[]*/
/*[]*/
_imopVarPre351 = dx1;
} else {
/*[]*/
/*[]*/
_imopVarPre358 = (dy1 > dz1);
/*[]*/
/*[]*/
if (_imopVarPre358) {
/*[]*/
/*[]*/
_imopVarPre359 = dy1;
} else {
/*[]*/
/*[]*/
_imopVarPre359 = dz1;
}
/*[]*/
_imopVarPre351 = _imopVarPre359;
}
/*[]*/
dssp = _imopVarPre351 / 4.0;
/*[]*/
ce[0][0] = 2.0;
/*[]*/
ce[0][1] = 0.0;
/*[]*/
ce[0][2] = 0.0;
/*[]*/
ce[0][3] = 4.0;
/*[]*/
ce[0][4] = 5.0;
/*[]*/
ce[0][5] = 3.0;
/*[]*/
ce[0][6] = 5.0e-01;
/*[]*/
ce[0][7] = 2.0e-02;
/*[]*/
ce[0][8] = 1.0e-02;
/*[]*/
ce[0][9] = 3.0e-02;
/*[]*/
ce[0][10] = 5.0e-01;
/*[]*/
ce[0][11] = 4.0e-01;
/*[]*/
ce[0][12] = 3.0e-01;
/*[]*/
ce[1][0] = 1.0;
/*[]*/
ce[1][1] = 0.0;
/*[]*/
ce[1][2] = 0.0;
/*[]*/
ce[1][3] = 0.0;
/*[]*/
ce[1][4] = 1.0;
/*[]*/
ce[1][5] = 2.0;
/*[]*/
ce[1][6] = 3.0;
/*[]*/
ce[1][7] = 1.0e-02;
/*[]*/
ce[1][8] = 3.0e-02;
/*[]*/
ce[1][9] = 2.0e-02;
/*[]*/
ce[1][10] = 4.0e-01;
/*[]*/
ce[1][11] = 3.0e-01;
/*[]*/
ce[1][12] = 5.0e-01;
/*[]*/
ce[2][0] = 2.0;
/*[]*/
ce[2][1] = 2.0;
/*[]*/
ce[2][2] = 0.0;
/*[]*/
ce[2][3] = 0.0;
/*[]*/
ce[2][4] = 0.0;
/*[]*/
ce[2][5] = 2.0;
/*[]*/
ce[2][6] = 3.0;
/*[]*/
ce[2][7] = 4.0e-02;
/*[]*/
ce[2][8] = 3.0e-02;
/*[]*/
ce[2][9] = 5.0e-02;
/*[]*/
ce[2][10] = 3.0e-01;
/*[]*/
ce[2][11] = 5.0e-01;
/*[]*/
ce[2][12] = 4.0e-01;
/*[]*/
ce[3][0] = 2.0;
/*[]*/
ce[3][1] = 2.0;
/*[]*/
ce[3][2] = 0.0;
/*[]*/
ce[3][3] = 0.0;
/*[]*/
ce[3][4] = 0.0;
/*[]*/
ce[3][5] = 2.0;
/*[]*/
ce[3][6] = 3.0;
/*[]*/
ce[3][7] = 3.0e-02;
/*[]*/
ce[3][8] = 5.0e-02;
/*[]*/
ce[3][9] = 4.0e-02;
/*[]*/
ce[3][10] = 2.0e-01;
/*[]*/
ce[3][11] = 1.0e-01;
/*[]*/
ce[3][12] = 3.0e-01;
/*[]*/
ce[4][0] = 5.0;
/*[]*/
ce[4][1] = 4.0;
/*[]*/
ce[4][2] = 3.0;
/*[]*/
ce[4][3] = 2.0;
/*[]*/
ce[4][4] = 1.0e-01;
/*[]*/
ce[4][5] = 4.0e-01;
/*[]*/
ce[4][6] = 3.0e-01;
/*[]*/
ce[4][7] = 5.0e-02;
/*[]*/
ce[4][8] = 4.0e-02;
/*[]*/
ce[4][9] = 3.0e-02;
/*[]*/
ce[4][10] = 1.0e-01;
/*[]*/
ce[4][11] = 3.0e-01;
/*[]*/
ce[4][12] = 2.0e-01;
}
/*[]*/
static void setiv() {
/*[]*/
/*[54]*/
#pragma omp parallel
{
/*[54]*/
/*[54]*/
int i;
/*[54]*/
int j;
/*[54]*/
int k;
/*[54]*/
int m;
/*[54]*/
int iglob;
/*[54]*/
int jglob;
/*[54]*/
double xi;
/*[54]*/
double eta;
/*[54]*/
double zeta;
/*[54]*/
double pxi;
/*[54]*/
double peta;
/*[54]*/
double pzeta;
/*[54]*/
double ue_1jk[5];
/*[54]*/
double ue_nx0jk[5];
/*[54]*/
double ue_i1k[5];
/*[54]*/
double ue_iny0k[5];
/*[54]*/
double ue_ij1[5];
/*[54]*/
double ue_ijnz[5];
/*[54]*/
#pragma omp for nowait
/*[54]*/
/*[54]*/
/*[54]*/
for (j = 0; j < ny; j++) {
/*[54]*/
/*[54]*/
jglob = j;
/*[54]*/
/*[54]*/
/*[54]*/
/*[54]*/
for (k = 1; k < nz - 1; k++) {
/*[54]*/
/*[54]*/
zeta = ((double) k) / (nz - 1);
/*[54]*/
int _imopVarPre361;
/*[54]*/
_imopVarPre361 = jglob != 0;
/*[54]*/
/*[54]*/
if (_imopVarPre361) {
/*[54]*/
/*[54]*/
_imopVarPre361 = jglob != ny0 - 1;
}
/*[54]*/
/*[54]*/
if (_imopVarPre361) {
/*[54]*/
/*[54]*/
eta = ((double) jglob) / (ny0 - 1);
/*[54]*/
/*[54]*/
/*[54]*/
/*[54]*/
for (i = 0; i < nx; i++) {
/*[54]*/
/*[54]*/
iglob = i;
/*[54]*/
int _imopVarPre363;
/*[54]*/
_imopVarPre363 = iglob != 0;
/*[54]*/
/*[54]*/
if (_imopVarPre363) {
/*[54]*/
/*[54]*/
_imopVarPre363 = iglob != nx0 - 1;
}
/*[54]*/
/*[54]*/
if (_imopVarPre363) {
/*[54]*/
/*[54]*/
xi = ((double) iglob) / (nx0 - 1);
/*[54]*/
exact(0, jglob, k, ue_1jk);
/*[54]*/
/*[54]*/
int _imopVarPre365;
/*[54]*/
_imopVarPre365 = nx0 - 1;
/*[54]*/
exact(_imopVarPre365, jglob, k, ue_nx0jk);
/*[54]*/
/*[54]*/
exact(iglob, 0, k, ue_i1k);
/*[54]*/
/*[54]*/
int _imopVarPre367;
/*[54]*/
_imopVarPre367 = ny0 - 1;
/*[54]*/
exact(iglob, _imopVarPre367, k, ue_iny0k);
/*[54]*/
/*[54]*/
exact(iglob, jglob, 0, ue_ij1);
/*[54]*/
/*[54]*/
int _imopVarPre369;
/*[54]*/
_imopVarPre369 = nz - 1;
/*[54]*/
exact(iglob, jglob, _imopVarPre369, ue_ijnz);
/*[54]*/
/*[54]*/
/*[54]*/
/*[54]*/
/*[54]*/
for (m = 0; m < 5; m++) {
/*[54]*/
/*[54]*/
pxi = (1.0 - xi) * ue_1jk[m] + xi * ue_nx0jk[m];
/*[54]*/
peta = (1.0 - eta) * ue_i1k[m] + eta * ue_iny0k[m];
/*[54]*/
pzeta = (1.0 - zeta) * ue_ij1[m] + zeta * ue_ijnz[m];
/*[54]*/
u[i][j][k][m] = pxi + peta + pzeta - pxi * peta - peta * pzeta - pzeta * pxi + pxi * peta * pzeta;
}
}
}
}
}
}
}
}
/*[]*/
static void ssor() {
/*[]*/
/*[]*/
int i;
/*[]*/
int j;
/*[]*/
int k;
/*[]*/
int m;
/*[]*/
int istep;
/*[]*/
double tmp;
/*[]*/
double delunm[5];
/*[]*/
double tv[12][12][5];
/*[]*/
tmp = 1.0 / (omega * (2.0 - omega));
/*[55]*/
#pragma omp parallel private(i, j, k, m)
{
/*[55]*/
/*[55]*/
#pragma omp for nowait
/*[55]*/
/*[55]*/
/*[55]*/
for (i = 0; i < 12; i++) {
/*[55]*/
/*[55]*/
/*[55]*/
/*[55]*/
/*[55]*/
for (j = 0; j < 12; j++) {
/*[55]*/
/*[55]*/
/*[55]*/
/*[55]*/
/*[55]*/
for (k = 0; k < 5; k++) {
/*[55]*/
/*[55]*/
/*[55]*/
/*[55]*/
/*[55]*/
for (m = 0; m < 5; m++) {
/*[55]*/
/*[55]*/
a[i][j][k][m] = 0.0;
/*[55]*/
b[i][j][k][m] = 0.0;
/*[55]*/
c[i][j][k][m] = 0.0;
/*[55]*/
d[i][j][k][m] = 0.0;
}
}
}
}
}
/*[56]*/
#pragma omp parallel
{
/*[56]*/
/*[56]*/
int i_imopVarPre84;
/*[56]*/
int j_imopVarPre85;
/*[56]*/
int k_imopVarPre86;
/*[56]*/
int m_imopVarPre87;
/*[56]*/
int L1;
/*[56]*/
int L2;
/*[56]*/
int ist1;
/*[56]*/
int iend1;
/*[56]*/
int jst1;
/*[56]*/
int jend1;
/*[56]*/
double q;
/*[56]*/
double u21;
/*[56]*/
double u31;
/*[56]*/
double u41;
/*[56]*/
double tmp_imopVarPre88;
/*[56]*/
double u21i;
/*[56]*/
double u31i;
/*[56]*/
double u41i;
/*[56]*/
double u51i;
/*[56]*/
double u21j;
/*[56]*/
double u31j;
/*[56]*/
double u41j;
/*[56]*/
double u51j;
/*[56]*/
double u21k;
/*[56]*/
double u31k;
/*[56]*/
double u41k;
/*[56]*/
double u51k;
/*[56]*/
double u21im1;
/*[56]*/
double u31im1;
/*[56]*/
double u41im1;
/*[56]*/
double u51im1;
/*[56]*/
double u21jm1;
/*[56]*/
double u31jm1;
/*[56]*/
double u41jm1;
/*[56]*/
double u51jm1;
/*[56]*/
double u21km1;
/*[56]*/
double u31km1;
/*[56]*/
double u41km1;
/*[56]*/
double u51km1;
/*[56]*/
#pragma omp for nowait
/*[56]*/
/*[56]*/
/*[56]*/
for (i_imopVarPre84 = 0; i_imopVarPre84 <= nx - 1; i_imopVarPre84++) {
/*[56]*/
/*[56]*/
/*[56]*/
/*[56]*/
/*[56]*/
for (j_imopVarPre85 = 0; j_imopVarPre85 <= ny - 1; j_imopVarPre85++) {
/*[56]*/
/*[56]*/
/*[56]*/
/*[56]*/
/*[56]*/
for (k_imopVarPre86 = 0; k_imopVarPre86 <= nz - 1; k_imopVarPre86++) {
/*[56]*/
/*[56]*/
/*[56]*/
/*[56]*/
/*[56]*/
for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) {
/*[56]*/
/*[56]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = -frct[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87];
}
}
}
}
/*[56]*/
L1 = 0;
/*[56]*/
L2 = nx - 1;
/*[56]*/
#pragma omp for nowait
/*[56]*/
/*[56]*/
/*[56]*/
for (i_imopVarPre84 = L1; i_imopVarPre84 <= L2; i_imopVarPre84++) {
/*[56]*/
/*[56]*/
/*[56]*/
/*[56]*/
/*[56]*/
for (j_imopVarPre85 = jst; j_imopVarPre85 <= jend; j_imopVarPre85++) {
/*[56]*/
/*[56]*/
/*[56]*/
/*[56]*/
/*[56]*/
for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) {
/*[56]*/
/*[56]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1];
/*[56]*/
u21 = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0];
/*[56]*/
q = 0.50 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0];
/*[56]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u21 + 0.40e+00 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - q);
/*[56]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u21;
/*[56]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u21;
/*[56]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = (1.40e+00 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - 0.40e+00 * q) * u21;
}
}
}
/*[56]*/
// #pragma omp dummyFlush BARRIER_START
/*[56]*/
#pragma omp barrier
/*[57]*/
#pragma omp for nowait
/*[57]*/
/*[57]*/
/*[57]*/
for (j_imopVarPre85 = jst; j_imopVarPre85 <= jend; j_imopVarPre85++) {
/*[57]*/
/*[57]*/
/*[57]*/
/*[57]*/
/*[57]*/
for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) {
/*[57]*/
/*[57]*/
/*[57]*/
/*[57]*/
/*[57]*/
for (i_imopVarPre84 = ist; i_imopVarPre84 <= iend; i_imopVarPre84++) {
/*[57]*/
/*[57]*/
/*[57]*/
/*[57]*/
/*[57]*/
for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) {
/*[57]*/
/*[57]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - tx2 * (flux[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - flux[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]);
}
}
/*[57]*/
L2 = nx - 1;
/*[57]*/
/*[57]*/
/*[57]*/
/*[57]*/
for (i_imopVarPre84 = ist; i_imopVarPre84 <= L2; i_imopVarPre84++) {
/*[57]*/
/*[57]*/
tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0];
/*[57]*/
u21i = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1];
/*[57]*/
u31i = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2];
/*[57]*/
u41i = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3];
/*[57]*/
u51i = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4];
/*[57]*/
tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][0];
/*[57]*/
u21im1 = tmp_imopVarPre88 * u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][1];
/*[57]*/
u31im1 = tmp_imopVarPre88 * u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][2];
/*[57]*/
u41im1 = tmp_imopVarPre88 * u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][3];
/*[57]*/
u51im1 = tmp_imopVarPre88 * u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][4];
/*[57]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = (4.0 / 3.0) * tx3 * (u21i - u21im1);
/*[57]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = tx3 * (u31i - u31im1);
/*[57]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = tx3 * (u41i - u41im1);
/*[57]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tx3 * (((u21i * u21i) + (u31i * u31i) + (u41i * u41i)) - ((u21im1 * u21im1) + (u31im1 * u31im1) + (u41im1 * u41im1))) + (1.0 / 6.0) * tx3 * ((u21i * u21i) - (u21im1 * u21im1)) + 1.40e+00 * 1.40e+00 * tx3 * (u51i - u51im1);
}
/*[57]*/
/*[57]*/
/*[57]*/
/*[57]*/
for (i_imopVarPre84 = ist; i_imopVarPre84 <= iend; i_imopVarPre84++) {
/*[57]*/
/*[57]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + dx1 * tx1 * (u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][0] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][0]);
/*[57]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][1] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]) + dx2 * tx1 * (u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][1] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][1]);
/*[57]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][2] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]) + dx3 * tx1 * (u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][2] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][2]);
/*[57]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][3] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) + dx4 * tx1 * (u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][3] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][3]);
/*[57]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][4] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4]) + dx5 * tx1 * (u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][4] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][4]);
}
/*[57]*/
/*[57]*/
/*[57]*/
/*[57]*/
for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) {
/*[57]*/
/*[57]*/
rsd[1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (+5.0 * u[1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + u[3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]);
/*[57]*/
rsd[2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (-4.0 * u[1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + u[4][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]);
}
/*[57]*/
ist1 = 3;
/*[57]*/
iend1 = nx - 4;
/*[57]*/
/*[57]*/
/*[57]*/
/*[57]*/
for (i_imopVarPre84 = ist1; i_imopVarPre84 <= iend1; i_imopVarPre84++) {
/*[57]*/
/*[57]*/
/*[57]*/
/*[57]*/
/*[57]*/
for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) {
/*[57]*/
/*[57]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (u[i_imopVarPre84 - 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84 - 1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84 + 1][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + u[i_imopVarPre84 + 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]);
}
}
/*[57]*/
/*[57]*/
/*[57]*/
/*[57]*/
for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) {
/*[57]*/
/*[57]*/
rsd[nx - 3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[nx - 3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (u[nx - 5][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[nx - 4][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[nx - 3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[nx - 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]);
/*[57]*/
rsd[nx - 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[nx - 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (u[nx - 4][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[nx - 3][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] + 5.0 * u[nx - 2][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87]);
}
}
}
/*[57]*/
// #pragma omp dummyFlush BARRIER_START
/*[57]*/
#pragma omp barrier
/*[58]*/
L1 = 0;
/*[58]*/
L2 = ny - 1;
/*[58]*/
#pragma omp for nowait
/*[58]*/
/*[58]*/
/*[58]*/
for (i_imopVarPre84 = ist; i_imopVarPre84 <= iend; i_imopVarPre84++) {
/*[58]*/
/*[58]*/
/*[58]*/
/*[58]*/
/*[58]*/
for (j_imopVarPre85 = L1; j_imopVarPre85 <= L2; j_imopVarPre85++) {
/*[58]*/
/*[58]*/
/*[58]*/
/*[58]*/
/*[58]*/
for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) {
/*[58]*/
/*[58]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2];
/*[58]*/
u31 = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0];
/*[58]*/
q = 0.50 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0];
/*[58]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u31;
/*[58]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u31 + 0.40e+00 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - q);
/*[58]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u31;
/*[58]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = (1.40e+00 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - 0.40e+00 * q) * u31;
}
}
}
/*[58]*/
// #pragma omp dummyFlush BARRIER_START
/*[58]*/
#pragma omp barrier
/*[59]*/
#pragma omp for nowait
/*[59]*/
/*[59]*/
/*[59]*/
for (i_imopVarPre84 = ist; i_imopVarPre84 <= iend; i_imopVarPre84++) {
/*[59]*/
/*[59]*/
/*[59]*/
/*[59]*/
/*[59]*/
for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) {
/*[59]*/
/*[59]*/
/*[59]*/
/*[59]*/
/*[59]*/
for (j_imopVarPre85 = jst; j_imopVarPre85 <= jend; j_imopVarPre85++) {
/*[59]*/
/*[59]*/
/*[59]*/
/*[59]*/
/*[59]*/
for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) {
/*[59]*/
/*[59]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - ty2 * (flux[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][m_imopVarPre87] - flux[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][m_imopVarPre87]);
}
}
/*[59]*/
L2 = ny - 1;
/*[59]*/
/*[59]*/
/*[59]*/
/*[59]*/
for (j_imopVarPre85 = jst; j_imopVarPre85 <= L2; j_imopVarPre85++) {
/*[59]*/
/*[59]*/
tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0];
/*[59]*/
u21j = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1];
/*[59]*/
u31j = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2];
/*[59]*/
u41j = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3];
/*[59]*/
u51j = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4];
/*[59]*/
tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][0];
/*[59]*/
u21jm1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][1];
/*[59]*/
u31jm1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][2];
/*[59]*/
u41jm1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][3];
/*[59]*/
u51jm1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][4];
/*[59]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = ty3 * (u21j - u21jm1);
/*[59]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = (4.0 / 3.0) * ty3 * (u31j - u31jm1);
/*[59]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = ty3 * (u41j - u41jm1);
/*[59]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * ty3 * (((u21j * u21j) + (u31j * u31j) + (u41j * u41j)) - ((u21jm1 * u21jm1) + (u31jm1 * u31jm1) + (u41jm1 * u41jm1))) + (1.0 / 6.0) * ty3 * ((u31j * u31j) - (u31jm1 * u31jm1)) + 1.40e+00 * 1.40e+00 * ty3 * (u51j - u51jm1);
}
/*[59]*/
/*[59]*/
/*[59]*/
/*[59]*/
for (j_imopVarPre85 = jst; j_imopVarPre85 <= jend; j_imopVarPre85++) {
/*[59]*/
/*[59]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + dy1 * ty1 * (u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][0] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][0]);
/*[59]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][1] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]) + dy2 * ty1 * (u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][1] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][1]);
/*[59]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][2] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]) + dy3 * ty1 * (u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][2] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][2]);
/*[59]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][3] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) + dy4 * ty1 * (u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][3] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][3]);
/*[59]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][4] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4]) + dy5 * ty1 * (u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][4] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][4]);
}
/*[59]*/
/*[59]*/
/*[59]*/
/*[59]*/
for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) {
/*[59]*/
/*[59]*/
rsd[i_imopVarPre84][1][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][1][k_imopVarPre86][m_imopVarPre87] - dssp * (+5.0 * u[i_imopVarPre84][1][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][2][k_imopVarPre86][m_imopVarPre87] + u[i_imopVarPre84][3][k_imopVarPre86][m_imopVarPre87]);
/*[59]*/
rsd[i_imopVarPre84][2][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][2][k_imopVarPre86][m_imopVarPre87] - dssp * (-4.0 * u[i_imopVarPre84][1][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][2][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][3][k_imopVarPre86][m_imopVarPre87] + u[i_imopVarPre84][4][k_imopVarPre86][m_imopVarPre87]);
}
/*[59]*/
jst1 = 3;
/*[59]*/
jend1 = ny - 4;
/*[59]*/
/*[59]*/
/*[59]*/
/*[59]*/
for (j_imopVarPre85 = jst1; j_imopVarPre85 <= jend1; j_imopVarPre85++) {
/*[59]*/
/*[59]*/
/*[59]*/
/*[59]*/
/*[59]*/
for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) {
/*[59]*/
/*[59]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (u[i_imopVarPre84][j_imopVarPre85 - 2][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85 - 1][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85 + 1][k_imopVarPre86][m_imopVarPre87] + u[i_imopVarPre84][j_imopVarPre85 + 2][k_imopVarPre86][m_imopVarPre87]);
}
}
/*[59]*/
/*[59]*/
/*[59]*/
/*[59]*/
for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) {
/*[59]*/
/*[59]*/
rsd[i_imopVarPre84][ny - 3][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][ny - 3][k_imopVarPre86][m_imopVarPre87] - dssp * (u[i_imopVarPre84][ny - 5][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][ny - 4][k_imopVarPre86][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][ny - 3][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][ny - 2][k_imopVarPre86][m_imopVarPre87]);
/*[59]*/
rsd[i_imopVarPre84][ny - 2][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][ny - 2][k_imopVarPre86][m_imopVarPre87] - dssp * (u[i_imopVarPre84][ny - 4][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][ny - 3][k_imopVarPre86][m_imopVarPre87] + 5.0 * u[i_imopVarPre84][ny - 2][k_imopVarPre86][m_imopVarPre87]);
}
}
}
/*[59]*/
// #pragma omp dummyFlush BARRIER_START
/*[59]*/
#pragma omp barrier
/*[60]*/
#pragma omp for nowait
/*[60]*/
/*[60]*/
/*[60]*/
for (i_imopVarPre84 = ist; i_imopVarPre84 <= iend; i_imopVarPre84++) {
/*[60]*/
/*[60]*/
/*[60]*/
/*[60]*/
/*[60]*/
for (j_imopVarPre85 = jst; j_imopVarPre85 <= jend; j_imopVarPre85++) {
/*[60]*/
/*[60]*/
/*[60]*/
/*[60]*/
/*[60]*/
for (k_imopVarPre86 = 0; k_imopVarPre86 <= nz - 1; k_imopVarPre86++) {
/*[60]*/
/*[60]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3];
/*[60]*/
u41 = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0];
/*[60]*/
q = 0.50 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0];
/*[60]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] * u41;
/*[60]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] * u41;
/*[60]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] * u41 + 0.40e+00 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - q);
/*[60]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = (1.40e+00 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] - 0.40e+00 * q) * u41;
}
/*[60]*/
/*[60]*/
/*[60]*/
/*[60]*/
for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) {
/*[60]*/
/*[60]*/
/*[60]*/
/*[60]*/
/*[60]*/
for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) {
/*[60]*/
/*[60]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - tz2 * (flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][m_imopVarPre87] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][m_imopVarPre87]);
}
}
/*[60]*/
/*[60]*/
/*[60]*/
/*[60]*/
for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 1; k_imopVarPre86++) {
/*[60]*/
/*[60]*/
tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0];
/*[60]*/
u21k = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1];
/*[60]*/
u31k = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2];
/*[60]*/
u41k = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3];
/*[60]*/
u51k = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4];
/*[60]*/
tmp_imopVarPre88 = 1.0 / u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][0];
/*[60]*/
u21km1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][1];
/*[60]*/
u31km1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][2];
/*[60]*/
u41km1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][3];
/*[60]*/
u51km1 = tmp_imopVarPre88 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][4];
/*[60]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = tz3 * (u21k - u21km1);
/*[60]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = tz3 * (u31k - u31km1);
/*[60]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = (4.0 / 3.0) * tz3 * (u41k - u41km1);
/*[60]*/
flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tz3 * (((u21k * u21k) + (u31k * u31k) + (u41k * u41k)) - ((u21km1 * u21km1) + (u31km1 * u31km1) + (u41km1 * u41km1))) + (1.0 / 6.0) * tz3 * ((u41k * u41k) - (u41km1 * u41km1)) + 1.40e+00 * 1.40e+00 * tz3 * (u51k - u51km1);
}
/*[60]*/
/*[60]*/
/*[60]*/
/*[60]*/
for (k_imopVarPre86 = 1; k_imopVarPre86 <= nz - 2; k_imopVarPre86++) {
/*[60]*/
/*[60]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + dz1 * tz1 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][0] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][0] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][0]);
/*[60]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][1] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1]) + dz2 * tz1 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][1] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][1] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][1]);
/*[60]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][2] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2]) + dz3 * tz1 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][2] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][2] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][2]);
/*[60]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][3] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3]) + dz4 * tz1 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][3] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][3] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][3]);
/*[60]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][4] - flux[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4]) + dz5 * tz1 * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][4] - 2.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][4] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][4]);
}
/*[60]*/
/*[60]*/
/*[60]*/
/*[60]*/
for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) {
/*[60]*/
/*[60]*/
rsd[i_imopVarPre84][j_imopVarPre85][1][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][1][m_imopVarPre87] - dssp * (+5.0 * u[i_imopVarPre84][j_imopVarPre85][1][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][2][m_imopVarPre87] + u[i_imopVarPre84][j_imopVarPre85][3][m_imopVarPre87]);
/*[60]*/
rsd[i_imopVarPre84][j_imopVarPre85][2][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][2][m_imopVarPre87] - dssp * (-4.0 * u[i_imopVarPre84][j_imopVarPre85][1][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][j_imopVarPre85][2][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][3][m_imopVarPre87] + u[i_imopVarPre84][j_imopVarPre85][4][m_imopVarPre87]);
}
/*[60]*/
/*[60]*/
/*[60]*/
/*[60]*/
for (k_imopVarPre86 = 3; k_imopVarPre86 <= nz - 4; k_imopVarPre86++) {
/*[60]*/
/*[60]*/
/*[60]*/
/*[60]*/
/*[60]*/
for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) {
/*[60]*/
/*[60]*/
rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - dssp * (u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 2][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 - 1][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 1][m_imopVarPre87] + u[i_imopVarPre84][j_imopVarPre85][k_imopVarPre86 + 2][m_imopVarPre87]);
}
}
/*[60]*/
/*[60]*/
/*[60]*/
/*[60]*/
for (m_imopVarPre87 = 0; m_imopVarPre87 < 5; m_imopVarPre87++) {
/*[60]*/
/*[60]*/
rsd[i_imopVarPre84][j_imopVarPre85][nz - 3][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][nz - 3][m_imopVarPre87] - dssp * (u[i_imopVarPre84][j_imopVarPre85][nz - 5][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][nz - 4][m_imopVarPre87] + 6.0 * u[i_imopVarPre84][j_imopVarPre85][nz - 3][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][nz - 2][m_imopVarPre87]);
/*[60]*/
rsd[i_imopVarPre84][j_imopVarPre85][nz - 2][m_imopVarPre87] = rsd[i_imopVarPre84][j_imopVarPre85][nz - 2][m_imopVarPre87] - dssp * (u[i_imopVarPre84][j_imopVarPre85][nz - 4][m_imopVarPre87] - 4.0 * u[i_imopVarPre84][j_imopVarPre85][nz - 3][m_imopVarPre87] + 5.0 * u[i_imopVarPre84][j_imopVarPre85][nz - 2][m_imopVarPre87]);
}
}
}
}
/*[61]*/
#pragma omp parallel
{
/*[61]*/
/*[61]*/
double ( *v )[12 / 2 * 2 + 1][12 / 2 * 2 + 1][5];
/*[61]*/
double *sum;
/*[61]*/
v = rsd;
/*[61]*/
sum = rsdnm;
/*[61]*/
int i_imopVarPre75;
/*[61]*/
int j_imopVarPre76;
/*[61]*/
int k_imopVarPre77;
/*[61]*/
int m_imopVarPre78;
/*[61]*/
double sum0 = 0.0;
/*[61]*/
double sum1 = 0.0;
/*[61]*/
double sum2 = 0.0;
/*[61]*/
double sum3 = 0.0;
/*[61]*/
double sum4 = 0.0;
/*[61]*/
#pragma omp single nowait
{
/*[61]*/
/*[61]*/
/*[61]*/
/*[61]*/
/*[61]*/
for (m_imopVarPre78 = 0; m_imopVarPre78 < 5; m_imopVarPre78++) {
/*[61]*/
/*[61]*/
sum[m_imopVarPre78] = 0.0;
}
}
/*[61]*/
// #pragma omp dummyFlush BARRIER_START
/*[61]*/
#pragma omp barrier
/*[62]*/
#pragma omp for nowait
/*[62]*/
/*[62]*/
/*[62]*/
for (i_imopVarPre75 = ist; i_imopVarPre75 <= iend; i_imopVarPre75++) {
/*[62]*/
/*[62]*/
/*[62]*/
/*[62]*/
/*[62]*/
for (j_imopVarPre76 = jst; j_imopVarPre76 <= jend; j_imopVarPre76++) {
/*[62]*/
/*[62]*/
/*[62]*/
/*[62]*/
/*[62]*/
for (k_imopVarPre77 = 1; k_imopVarPre77 <= nz0 - 2; k_imopVarPre77++) {
/*[62]*/
/*[62]*/
sum0 = sum0 + v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][0] * v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][0];
/*[62]*/
sum1 = sum1 + v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][1] * v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][1];
/*[62]*/
sum2 = sum2 + v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][2] * v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][2];
/*[62]*/
sum3 = sum3 + v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][3] * v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][3];
/*[62]*/
sum4 = sum4 + v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][4] * v[i_imopVarPre75][j_imopVarPre76][k_imopVarPre77][4];
}
}
}
/*[62]*/
// #pragma omp dummyFlush CRITICAL_START
/*[62]*/
#pragma omp critical
{
/*[62]*/
/*[62]*/
sum[0] += sum0;
/*[62]*/
sum[1] += sum1;
/*[62]*/
sum[2] += sum2;
/*[62]*/
sum[3] += sum3;
/*[62]*/
sum[4] += sum4;
}
/*[62]*/
// #pragma omp dummyFlush CRITICAL_END
/*[62]*/
// #pragma omp dummyFlush BARRIER_START
/*[62]*/
#pragma omp barrier
/*[63]*/
#pragma omp single nowait
{
/*[63]*/
/*[63]*/
/*[63]*/
/*[63]*/
/*[63]*/
for (m_imopVarPre78 = 0; m_imopVarPre78 < 5; m_imopVarPre78++) {
/*[63]*/
/*[63]*/
double _imopVarPre154;
/*[63]*/
double _imopVarPre155;
/*[63]*/
_imopVarPre154 = sum[m_imopVarPre78] / ((nx0 - 2) * (ny0 - 2) * (nz0 - 2));
/*[63]*/
_imopVarPre155 = sqrt(_imopVarPre154);
/*[63]*/
/*[63]*/
sum[m_imopVarPre78] = _imopVarPre155;
}
}
}
/*[]*/
timer_clear(1);
/*[]*/
/*[]*/
timer_start(1);
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (istep = 1; istep <= itmax; istep++) {
/*[]*/
/*[]*/
int _imopVarPre372;
/*[]*/
int _imopVarPre370;
/*[]*/
int _imopVarPre371;
/*[]*/
_imopVarPre370 = istep % 20 == 0;
/*[]*/
/*[]*/
if (!_imopVarPre370) {
/*[]*/
/*[]*/
_imopVarPre371 = istep == itmax;
/*[]*/
/*[]*/
if (!_imopVarPre371) {
/*[]*/
/*[]*/
_imopVarPre371 = istep == 1;
}
/*[]*/
_imopVarPre370 = _imopVarPre371;
}
/*[]*/
/*[]*/
if (_imopVarPre370) {
/*[]*/
/*[]*/
#pragma omp master
{
/*[]*/
/*[]*/
printf(" Time step %4d\n", istep);
/*[]*/
}
}
/*[64]*/
#pragma omp parallel private(istep, i, j, k, m)
{
/*[64]*/
/*[64]*/
int _imopVarPre377;
/*[64]*/
int _imopVarPre378;
/*[64]*/
int _imopVarPre379;
/*[64]*/
int _imopVarPre380;
/*[64]*/
#pragma omp for nowait
/*[64]*/
/*[64]*/
/*[64]*/
for (i = ist; i <= iend; i++) {
/*[64]*/
/*[64]*/
/*[64]*/
/*[64]*/
/*[64]*/
for (j = jst; j <= jend; j++) {
/*[64]*/
/*[64]*/
/*[64]*/
/*[64]*/
/*[64]*/
for (k = 1; k <= nz - 2; k++) {
/*[64]*/
/*[64]*/
/*[64]*/
/*[64]*/
/*[64]*/
for (m = 0; m < 5; m++) {
/*[64]*/
/*[64]*/
rsd[i][j][k][m] = dt * rsd[i][j][k][m];
}
}
}
}
/*[64]*/
// #pragma omp dummyFlush BARRIER_START
/*[64]*/
#pragma omp barrier
/*[41]*/
/*[41]*/
/*[41]*/
/*[41]*/
for (k = 1; k <= nz - 2; k++) {
/*[41]*/
/*[41]*/
jacld(k);
/*[41]*/
/*[41]*/
blts(nx, ny, nz, k, omega, rsd, a, b, c, d, ist, iend, jst, jend, nx0, ny0);
/*[41]*/
}
/*[41]*/
// #pragma omp dummyFlush BARRIER_START
/*[41]*/
#pragma omp barrier
/*[42]*/
/*[42]*/
/*[42]*/
/*[42]*/
for (k = nz - 2; k >= 1; k--) {
/*[42]*/
/*[42]*/
jacu(k);
/*[42]*/
/*[42]*/
buts(nx, ny, nz, k, omega, rsd, tv, d, a, b, c, ist, iend, jst, jend, nx0, ny0);
/*[42]*/
}
/*[42]*/
// #pragma omp dummyFlush BARRIER_START
/*[42]*/
#pragma omp barrier
/*[65]*/
#pragma omp for nowait
/*[65]*/
/*[65]*/
/*[65]*/
for (i = ist; i <= iend; i++) {
/*[65]*/
/*[65]*/
/*[65]*/
/*[65]*/
/*[65]*/
for (j = jst; j <= jend; j++) {
/*[65]*/
/*[65]*/
/*[65]*/
/*[65]*/
/*[65]*/
for (k = 1; k <= nz - 2; k++) {
/*[65]*/
/*[65]*/
/*[65]*/
/*[65]*/
/*[65]*/
for (m = 0; m < 5; m++) {
/*[65]*/
/*[65]*/
u[i][j][k][m] = u[i][j][k][m] + tmp * rsd[i][j][k][m];
}
}
}
}
/*[65]*/
/*[65]*/
if (istep % inorm == 0) {
/*[65]*/
/*[65]*/
double ( *v )[12 / 2 * 2 + 1][12 / 2 * 2 + 1][5];
/*[65]*/
double *sum;
/*[65]*/
v = rsd;
/*[65]*/
sum = delunm;
/*[65]*/
int i_imopVarPre89;
/*[65]*/
int j_imopVarPre90;
/*[65]*/
int k_imopVarPre91;
/*[65]*/
int m_imopVarPre92;
/*[65]*/
double sum0 = 0.0;
/*[65]*/
double sum1 = 0.0;
/*[65]*/
double sum2 = 0.0;
/*[65]*/
double sum3 = 0.0;
/*[65]*/
double sum4 = 0.0;
/*[65]*/
#pragma omp single nowait
{
/*[65]*/
/*[65]*/
/*[65]*/
/*[65]*/
/*[65]*/
for (m_imopVarPre92 = 0; m_imopVarPre92 < 5; m_imopVarPre92++) {
/*[65]*/
/*[65]*/
sum[m_imopVarPre92] = 0.0;
}
}
/*[65]*/
// #pragma omp dummyFlush BARRIER_START
/*[65]*/
#pragma omp barrier
/*[66]*/
#pragma omp for nowait
/*[66]*/
/*[66]*/
/*[66]*/
for (i_imopVarPre89 = ist; i_imopVarPre89 <= iend; i_imopVarPre89++) {
/*[66]*/
/*[66]*/
/*[66]*/
/*[66]*/
/*[66]*/
for (j_imopVarPre90 = jst; j_imopVarPre90 <= jend; j_imopVarPre90++) {
/*[66]*/
/*[66]*/
/*[66]*/
/*[66]*/
/*[66]*/
for (k_imopVarPre91 = 1; k_imopVarPre91 <= nz0 - 2; k_imopVarPre91++) {
/*[66]*/
/*[66]*/
sum0 = sum0 + v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][0] * v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][0];
/*[66]*/
sum1 = sum1 + v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][1] * v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][1];
/*[66]*/
sum2 = sum2 + v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][2] * v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][2];
/*[66]*/
sum3 = sum3 + v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][3] * v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][3];
/*[66]*/
sum4 = sum4 + v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][4] * v[i_imopVarPre89][j_imopVarPre90][k_imopVarPre91][4];
}
}
}
/*[66]*/
// #pragma omp dummyFlush CRITICAL_START
/*[66]*/
#pragma omp critical
{
/*[66]*/
/*[66]*/
sum[0] += sum0;
/*[66]*/
sum[1] += sum1;
/*[66]*/
sum[2] += sum2;
/*[66]*/
sum[3] += sum3;
/*[66]*/
sum[4] += sum4;
}
/*[66]*/
// #pragma omp dummyFlush CRITICAL_END
/*[66]*/
// #pragma omp dummyFlush BARRIER_START
/*[66]*/
#pragma omp barrier
/*[67]*/
#pragma omp single nowait
{
/*[67]*/
/*[67]*/
/*[67]*/
/*[67]*/
/*[67]*/
for (m_imopVarPre92 = 0; m_imopVarPre92 < 5; m_imopVarPre92++) {
/*[67]*/
/*[67]*/
double _imopVarPre154;
/*[67]*/
double _imopVarPre155;
/*[67]*/
_imopVarPre154 = sum[m_imopVarPre92] / ((nx0 - 2) * (ny0 - 2) * (nz0 - 2));
/*[67]*/
_imopVarPre155 = sqrt(_imopVarPre154);
/*[67]*/
/*[67]*/
sum[m_imopVarPre92] = _imopVarPre155;
}
}
/*[67]*/
// #pragma omp dummyFlush BARRIER_START
/*[67]*/
#pragma omp barrier
/*[68]*/
// #pragma omp dummyFlush BARRIER_START
/*[68]*/
#pragma omp barrier
}
/*[65, 69]*/
// #pragma omp dummyFlush BARRIER_START
/*[65, 69]*/
#pragma omp barrier
/*[66, 70]*/
int i_imopVarPre79;
/*[66, 70]*/
int j_imopVarPre80;
/*[66, 70]*/
int k_imopVarPre81;
/*[66, 70]*/
int m_imopVarPre82;
/*[66, 70]*/
int L1;
/*[66, 70]*/
int L2;
/*[66, 70]*/
int ist1;
/*[66, 70]*/
int iend1;
/*[66, 70]*/
int jst1;
/*[66, 70]*/
int jend1;
/*[66, 70]*/
double q;
/*[66, 70]*/
double u21;
/*[66, 70]*/
double u31;
/*[66, 70]*/
double u41;
/*[66, 70]*/
double tmp_imopVarPre83;
/*[66, 70]*/
double u21i;
/*[66, 70]*/
double u31i;
/*[66, 70]*/
double u41i;
/*[66, 70]*/
double u51i;
/*[66, 70]*/
double u21j;
/*[66, 70]*/
double u31j;
/*[66, 70]*/
double u41j;
/*[66, 70]*/
double u51j;
/*[66, 70]*/
double u21k;
/*[66, 70]*/
double u31k;
/*[66, 70]*/
double u41k;
/*[66, 70]*/
double u51k;
/*[66, 70]*/
double u21im1;
/*[66, 70]*/
double u31im1;
/*[66, 70]*/
double u41im1;
/*[66, 70]*/
double u51im1;
/*[66, 70]*/
double u21jm1;
/*[66, 70]*/
double u31jm1;
/*[66, 70]*/
double u41jm1;
/*[66, 70]*/
double u51jm1;
/*[66, 70]*/
double u21km1;
/*[66, 70]*/
double u31km1;
/*[66, 70]*/
double u41km1;
/*[66, 70]*/
double u51km1;
/*[66, 70]*/
#pragma omp for nowait
/*[66, 70]*/
/*[66, 70]*/
/*[66, 70]*/
for (i_imopVarPre79 = 0; i_imopVarPre79 <= nx - 1; i_imopVarPre79++) {
/*[66, 70]*/
/*[66, 70]*/
/*[66, 70]*/
/*[66, 70]*/
/*[66, 70]*/
for (j_imopVarPre80 = 0; j_imopVarPre80 <= ny - 1; j_imopVarPre80++) {
/*[66, 70]*/
/*[66, 70]*/
/*[66, 70]*/
/*[66, 70]*/
/*[66, 70]*/
for (k_imopVarPre81 = 0; k_imopVarPre81 <= nz - 1; k_imopVarPre81++) {
/*[66, 70]*/
/*[66, 70]*/
/*[66, 70]*/
/*[66, 70]*/
/*[66, 70]*/
for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) {
/*[66, 70]*/
/*[66, 70]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = -frct[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82];
}
}
}
}
/*[66, 70]*/
// #pragma omp dummyFlush BARRIER_START
/*[66, 70]*/
#pragma omp barrier
/*[67, 71]*/
L1 = 0;
/*[67, 71]*/
L2 = nx - 1;
/*[67, 71]*/
#pragma omp for nowait
/*[67, 71]*/
/*[67, 71]*/
/*[67, 71]*/
for (i_imopVarPre79 = L1; i_imopVarPre79 <= L2; i_imopVarPre79++) {
/*[67, 71]*/
/*[67, 71]*/
/*[67, 71]*/
/*[67, 71]*/
/*[67, 71]*/
for (j_imopVarPre80 = jst; j_imopVarPre80 <= jend; j_imopVarPre80++) {
/*[67, 71]*/
/*[67, 71]*/
/*[67, 71]*/
/*[67, 71]*/
/*[67, 71]*/
for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) {
/*[67, 71]*/
/*[67, 71]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1];
/*[67, 71]*/
u21 = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0];
/*[67, 71]*/
q = 0.50 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0];
/*[67, 71]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u21 + 0.40e+00 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - q);
/*[67, 71]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u21;
/*[67, 71]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u21;
/*[67, 71]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = (1.40e+00 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - 0.40e+00 * q) * u21;
}
}
}
/*[67, 71]*/
// #pragma omp dummyFlush BARRIER_START
/*[67, 71]*/
#pragma omp barrier
/*[68, 72]*/
#pragma omp for nowait
/*[68, 72]*/
/*[68, 72]*/
/*[68, 72]*/
for (j_imopVarPre80 = jst; j_imopVarPre80 <= jend; j_imopVarPre80++) {
/*[68, 72]*/
/*[68, 72]*/
/*[68, 72]*/
/*[68, 72]*/
/*[68, 72]*/
for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) {
/*[68, 72]*/
/*[68, 72]*/
/*[68, 72]*/
/*[68, 72]*/
/*[68, 72]*/
for (i_imopVarPre79 = ist; i_imopVarPre79 <= iend; i_imopVarPre79++) {
/*[68, 72]*/
/*[68, 72]*/
/*[68, 72]*/
/*[68, 72]*/
/*[68, 72]*/
for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) {
/*[68, 72]*/
/*[68, 72]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - tx2 * (flux[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - flux[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]);
}
}
/*[68, 72]*/
L2 = nx - 1;
/*[68, 72]*/
/*[68, 72]*/
/*[68, 72]*/
/*[68, 72]*/
for (i_imopVarPre79 = ist; i_imopVarPre79 <= L2; i_imopVarPre79++) {
/*[68, 72]*/
/*[68, 72]*/
tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0];
/*[68, 72]*/
u21i = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1];
/*[68, 72]*/
u31i = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2];
/*[68, 72]*/
u41i = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3];
/*[68, 72]*/
u51i = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4];
/*[68, 72]*/
tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][0];
/*[68, 72]*/
u21im1 = tmp_imopVarPre83 * u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][1];
/*[68, 72]*/
u31im1 = tmp_imopVarPre83 * u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][2];
/*[68, 72]*/
u41im1 = tmp_imopVarPre83 * u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][3];
/*[68, 72]*/
u51im1 = tmp_imopVarPre83 * u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][4];
/*[68, 72]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = (4.0 / 3.0) * tx3 * (u21i - u21im1);
/*[68, 72]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = tx3 * (u31i - u31im1);
/*[68, 72]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = tx3 * (u41i - u41im1);
/*[68, 72]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tx3 * (((u21i * u21i) + (u31i * u31i) + (u41i * u41i)) - ((u21im1 * u21im1) + (u31im1 * u31im1) + (u41im1 * u41im1))) + (1.0 / 6.0) * tx3 * ((u21i * u21i) - (u21im1 * u21im1)) + 1.40e+00 * 1.40e+00 * tx3 * (u51i - u51im1);
}
/*[68, 72]*/
/*[68, 72]*/
/*[68, 72]*/
/*[68, 72]*/
for (i_imopVarPre79 = ist; i_imopVarPre79 <= iend; i_imopVarPre79++) {
/*[68, 72]*/
/*[68, 72]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + dx1 * tx1 * (u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][0] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][0]);
/*[68, 72]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][1] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]) + dx2 * tx1 * (u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][1] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][1]);
/*[68, 72]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][2] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]) + dx3 * tx1 * (u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][2] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][2]);
/*[68, 72]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][3] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) + dx4 * tx1 * (u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][3] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][3]);
/*[68, 72]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][4] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4]) + dx5 * tx1 * (u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][4] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][4]);
}
/*[68, 72]*/
/*[68, 72]*/
/*[68, 72]*/
/*[68, 72]*/
for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) {
/*[68, 72]*/
/*[68, 72]*/
rsd[1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (+5.0 * u[1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + u[3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]);
/*[68, 72]*/
rsd[2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (-4.0 * u[1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + u[4][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]);
}
/*[68, 72]*/
ist1 = 3;
/*[68, 72]*/
iend1 = nx - 4;
/*[68, 72]*/
/*[68, 72]*/
/*[68, 72]*/
/*[68, 72]*/
for (i_imopVarPre79 = ist1; i_imopVarPre79 <= iend1; i_imopVarPre79++) {
/*[68, 72]*/
/*[68, 72]*/
/*[68, 72]*/
/*[68, 72]*/
/*[68, 72]*/
for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) {
/*[68, 72]*/
/*[68, 72]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (u[i_imopVarPre79 - 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79 - 1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79 + 1][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + u[i_imopVarPre79 + 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]);
}
}
/*[68, 72]*/
/*[68, 72]*/
/*[68, 72]*/
/*[68, 72]*/
for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) {
/*[68, 72]*/
/*[68, 72]*/
rsd[nx - 3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[nx - 3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (u[nx - 5][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[nx - 4][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[nx - 3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[nx - 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]);
/*[68, 72]*/
rsd[nx - 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[nx - 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (u[nx - 4][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[nx - 3][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] + 5.0 * u[nx - 2][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82]);
}
}
}
/*[68, 72]*/
// #pragma omp dummyFlush BARRIER_START
/*[68, 72]*/
#pragma omp barrier
/*[69, 73]*/
L1 = 0;
/*[69, 73]*/
L2 = ny - 1;
/*[69, 73]*/
#pragma omp for nowait
/*[69, 73]*/
/*[69, 73]*/
/*[69, 73]*/
for (i_imopVarPre79 = ist; i_imopVarPre79 <= iend; i_imopVarPre79++) {
/*[69, 73]*/
/*[69, 73]*/
/*[69, 73]*/
/*[69, 73]*/
/*[69, 73]*/
for (j_imopVarPre80 = L1; j_imopVarPre80 <= L2; j_imopVarPre80++) {
/*[69, 73]*/
/*[69, 73]*/
/*[69, 73]*/
/*[69, 73]*/
/*[69, 73]*/
for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) {
/*[69, 73]*/
/*[69, 73]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2];
/*[69, 73]*/
u31 = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0];
/*[69, 73]*/
q = 0.50 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0];
/*[69, 73]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u31;
/*[69, 73]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u31 + 0.40e+00 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - q);
/*[69, 73]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u31;
/*[69, 73]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = (1.40e+00 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - 0.40e+00 * q) * u31;
}
}
}
/*[69, 73]*/
// #pragma omp dummyFlush BARRIER_START
/*[69, 73]*/
#pragma omp barrier
/*[70, 74]*/
#pragma omp for nowait
/*[70, 74]*/
/*[70, 74]*/
/*[70, 74]*/
for (i_imopVarPre79 = ist; i_imopVarPre79 <= iend; i_imopVarPre79++) {
/*[70, 74]*/
/*[70, 74]*/
/*[70, 74]*/
/*[70, 74]*/
/*[70, 74]*/
for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) {
/*[70, 74]*/
/*[70, 74]*/
/*[70, 74]*/
/*[70, 74]*/
/*[70, 74]*/
for (j_imopVarPre80 = jst; j_imopVarPre80 <= jend; j_imopVarPre80++) {
/*[70, 74]*/
/*[70, 74]*/
/*[70, 74]*/
/*[70, 74]*/
/*[70, 74]*/
for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) {
/*[70, 74]*/
/*[70, 74]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - ty2 * (flux[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][m_imopVarPre82] - flux[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][m_imopVarPre82]);
}
}
/*[70, 74]*/
L2 = ny - 1;
/*[70, 74]*/
/*[70, 74]*/
/*[70, 74]*/
/*[70, 74]*/
for (j_imopVarPre80 = jst; j_imopVarPre80 <= L2; j_imopVarPre80++) {
/*[70, 74]*/
/*[70, 74]*/
tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0];
/*[70, 74]*/
u21j = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1];
/*[70, 74]*/
u31j = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2];
/*[70, 74]*/
u41j = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3];
/*[70, 74]*/
u51j = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4];
/*[70, 74]*/
tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][0];
/*[70, 74]*/
u21jm1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][1];
/*[70, 74]*/
u31jm1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][2];
/*[70, 74]*/
u41jm1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][3];
/*[70, 74]*/
u51jm1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][4];
/*[70, 74]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = ty3 * (u21j - u21jm1);
/*[70, 74]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = (4.0 / 3.0) * ty3 * (u31j - u31jm1);
/*[70, 74]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = ty3 * (u41j - u41jm1);
/*[70, 74]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * ty3 * (((u21j * u21j) + (u31j * u31j) + (u41j * u41j)) - ((u21jm1 * u21jm1) + (u31jm1 * u31jm1) + (u41jm1 * u41jm1))) + (1.0 / 6.0) * ty3 * ((u31j * u31j) - (u31jm1 * u31jm1)) + 1.40e+00 * 1.40e+00 * ty3 * (u51j - u51jm1);
}
/*[70, 74]*/
/*[70, 74]*/
/*[70, 74]*/
/*[70, 74]*/
for (j_imopVarPre80 = jst; j_imopVarPre80 <= jend; j_imopVarPre80++) {
/*[70, 74]*/
/*[70, 74]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + dy1 * ty1 * (u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][0] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][0]);
/*[70, 74]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][1] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]) + dy2 * ty1 * (u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][1] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][1]);
/*[70, 74]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][2] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]) + dy3 * ty1 * (u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][2] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][2]);
/*[70, 74]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][3] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) + dy4 * ty1 * (u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][3] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][3]);
/*[70, 74]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][4] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4]) + dy5 * ty1 * (u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][4] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][4]);
}
/*[70, 74]*/
/*[70, 74]*/
/*[70, 74]*/
/*[70, 74]*/
for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) {
/*[70, 74]*/
/*[70, 74]*/
rsd[i_imopVarPre79][1][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][1][k_imopVarPre81][m_imopVarPre82] - dssp * (+5.0 * u[i_imopVarPre79][1][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][2][k_imopVarPre81][m_imopVarPre82] + u[i_imopVarPre79][3][k_imopVarPre81][m_imopVarPre82]);
/*[70, 74]*/
rsd[i_imopVarPre79][2][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][2][k_imopVarPre81][m_imopVarPre82] - dssp * (-4.0 * u[i_imopVarPre79][1][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][2][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][3][k_imopVarPre81][m_imopVarPre82] + u[i_imopVarPre79][4][k_imopVarPre81][m_imopVarPre82]);
}
/*[70, 74]*/
jst1 = 3;
/*[70, 74]*/
jend1 = ny - 4;
/*[70, 74]*/
/*[70, 74]*/
/*[70, 74]*/
/*[70, 74]*/
for (j_imopVarPre80 = jst1; j_imopVarPre80 <= jend1; j_imopVarPre80++) {
/*[70, 74]*/
/*[70, 74]*/
/*[70, 74]*/
/*[70, 74]*/
/*[70, 74]*/
for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) {
/*[70, 74]*/
/*[70, 74]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (u[i_imopVarPre79][j_imopVarPre80 - 2][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80 - 1][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80 + 1][k_imopVarPre81][m_imopVarPre82] + u[i_imopVarPre79][j_imopVarPre80 + 2][k_imopVarPre81][m_imopVarPre82]);
}
}
/*[70, 74]*/
/*[70, 74]*/
/*[70, 74]*/
/*[70, 74]*/
for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) {
/*[70, 74]*/
/*[70, 74]*/
rsd[i_imopVarPre79][ny - 3][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][ny - 3][k_imopVarPre81][m_imopVarPre82] - dssp * (u[i_imopVarPre79][ny - 5][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][ny - 4][k_imopVarPre81][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][ny - 3][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][ny - 2][k_imopVarPre81][m_imopVarPre82]);
/*[70, 74]*/
rsd[i_imopVarPre79][ny - 2][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][ny - 2][k_imopVarPre81][m_imopVarPre82] - dssp * (u[i_imopVarPre79][ny - 4][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][ny - 3][k_imopVarPre81][m_imopVarPre82] + 5.0 * u[i_imopVarPre79][ny - 2][k_imopVarPre81][m_imopVarPre82]);
}
}
}
/*[70, 74]*/
// #pragma omp dummyFlush BARRIER_START
/*[70, 74]*/
#pragma omp barrier
/*[71, 75]*/
#pragma omp for nowait
/*[71, 75]*/
/*[71, 75]*/
/*[71, 75]*/
for (i_imopVarPre79 = ist; i_imopVarPre79 <= iend; i_imopVarPre79++) {
/*[71, 75]*/
/*[71, 75]*/
/*[71, 75]*/
/*[71, 75]*/
/*[71, 75]*/
for (j_imopVarPre80 = jst; j_imopVarPre80 <= jend; j_imopVarPre80++) {
/*[71, 75]*/
/*[71, 75]*/
/*[71, 75]*/
/*[71, 75]*/
/*[71, 75]*/
for (k_imopVarPre81 = 0; k_imopVarPre81 <= nz - 1; k_imopVarPre81++) {
/*[71, 75]*/
/*[71, 75]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3];
/*[71, 75]*/
u41 = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0];
/*[71, 75]*/
q = 0.50 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0];
/*[71, 75]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] * u41;
/*[71, 75]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] * u41;
/*[71, 75]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] * u41 + 0.40e+00 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - q);
/*[71, 75]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = (1.40e+00 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] - 0.40e+00 * q) * u41;
}
/*[71, 75]*/
/*[71, 75]*/
/*[71, 75]*/
/*[71, 75]*/
for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) {
/*[71, 75]*/
/*[71, 75]*/
/*[71, 75]*/
/*[71, 75]*/
/*[71, 75]*/
for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) {
/*[71, 75]*/
/*[71, 75]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - tz2 * (flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][m_imopVarPre82] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][m_imopVarPre82]);
}
}
/*[71, 75]*/
/*[71, 75]*/
/*[71, 75]*/
/*[71, 75]*/
for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 1; k_imopVarPre81++) {
/*[71, 75]*/
/*[71, 75]*/
tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0];
/*[71, 75]*/
u21k = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1];
/*[71, 75]*/
u31k = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2];
/*[71, 75]*/
u41k = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3];
/*[71, 75]*/
u51k = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4];
/*[71, 75]*/
tmp_imopVarPre83 = 1.0 / u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][0];
/*[71, 75]*/
u21km1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][1];
/*[71, 75]*/
u31km1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][2];
/*[71, 75]*/
u41km1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][3];
/*[71, 75]*/
u51km1 = tmp_imopVarPre83 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][4];
/*[71, 75]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = tz3 * (u21k - u21km1);
/*[71, 75]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = tz3 * (u31k - u31km1);
/*[71, 75]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = (4.0 / 3.0) * tz3 * (u41k - u41km1);
/*[71, 75]*/
flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tz3 * (((u21k * u21k) + (u31k * u31k) + (u41k * u41k)) - ((u21km1 * u21km1) + (u31km1 * u31km1) + (u41km1 * u41km1))) + (1.0 / 6.0) * tz3 * ((u41k * u41k) - (u41km1 * u41km1)) + 1.40e+00 * 1.40e+00 * tz3 * (u51k - u51km1);
}
/*[71, 75]*/
/*[71, 75]*/
/*[71, 75]*/
/*[71, 75]*/
for (k_imopVarPre81 = 1; k_imopVarPre81 <= nz - 2; k_imopVarPre81++) {
/*[71, 75]*/
/*[71, 75]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + dz1 * tz1 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][0] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][0] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][0]);
/*[71, 75]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][1] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1]) + dz2 * tz1 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][1] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][1] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][1]);
/*[71, 75]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][2] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2]) + dz3 * tz1 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][2] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][2] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][2]);
/*[71, 75]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][3] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3]) + dz4 * tz1 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][3] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][3] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][3]);
/*[71, 75]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][4] - flux[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4]) + dz5 * tz1 * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][4] - 2.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][4] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][4]);
}
/*[71, 75]*/
/*[71, 75]*/
/*[71, 75]*/
/*[71, 75]*/
for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) {
/*[71, 75]*/
/*[71, 75]*/
rsd[i_imopVarPre79][j_imopVarPre80][1][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][1][m_imopVarPre82] - dssp * (+5.0 * u[i_imopVarPre79][j_imopVarPre80][1][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][2][m_imopVarPre82] + u[i_imopVarPre79][j_imopVarPre80][3][m_imopVarPre82]);
/*[71, 75]*/
rsd[i_imopVarPre79][j_imopVarPre80][2][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][2][m_imopVarPre82] - dssp * (-4.0 * u[i_imopVarPre79][j_imopVarPre80][1][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][j_imopVarPre80][2][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][3][m_imopVarPre82] + u[i_imopVarPre79][j_imopVarPre80][4][m_imopVarPre82]);
}
/*[71, 75]*/
/*[71, 75]*/
/*[71, 75]*/
/*[71, 75]*/
for (k_imopVarPre81 = 3; k_imopVarPre81 <= nz - 4; k_imopVarPre81++) {
/*[71, 75]*/
/*[71, 75]*/
/*[71, 75]*/
/*[71, 75]*/
/*[71, 75]*/
for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) {
/*[71, 75]*/
/*[71, 75]*/
rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - dssp * (u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 2][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 - 1][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 1][m_imopVarPre82] + u[i_imopVarPre79][j_imopVarPre80][k_imopVarPre81 + 2][m_imopVarPre82]);
}
}
/*[71, 75]*/
/*[71, 75]*/
/*[71, 75]*/
/*[71, 75]*/
for (m_imopVarPre82 = 0; m_imopVarPre82 < 5; m_imopVarPre82++) {
/*[71, 75]*/
/*[71, 75]*/
rsd[i_imopVarPre79][j_imopVarPre80][nz - 3][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][nz - 3][m_imopVarPre82] - dssp * (u[i_imopVarPre79][j_imopVarPre80][nz - 5][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][nz - 4][m_imopVarPre82] + 6.0 * u[i_imopVarPre79][j_imopVarPre80][nz - 3][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][nz - 2][m_imopVarPre82]);
/*[71, 75]*/
rsd[i_imopVarPre79][j_imopVarPre80][nz - 2][m_imopVarPre82] = rsd[i_imopVarPre79][j_imopVarPre80][nz - 2][m_imopVarPre82] - dssp * (u[i_imopVarPre79][j_imopVarPre80][nz - 4][m_imopVarPre82] - 4.0 * u[i_imopVarPre79][j_imopVarPre80][nz - 3][m_imopVarPre82] + 5.0 * u[i_imopVarPre79][j_imopVarPre80][nz - 2][m_imopVarPre82]);
}
}
}
/*[71, 75]*/
// #pragma omp dummyFlush BARRIER_START
/*[71, 75]*/
#pragma omp barrier
/*[72, 76]*/
// #pragma omp dummyFlush BARRIER_START
/*[72, 76]*/
#pragma omp barrier
/*[73, 77]*/
#pragma omp master
{
/*[73, 77]*/
/*[73, 77]*/
_imopVarPre372 = (istep % inorm == 0);
/*[73, 77]*/
/*[73, 77]*/
if (!_imopVarPre372) {
/*[73, 77]*/
/*[73, 77]*/
_imopVarPre372 = (istep == itmax);
}
}
/*[73, 77]*/
// #pragma omp dummyFlush BARRIER_START
/*[73, 77]*/
#pragma omp barrier
/*[74]*/
/*[74]*/
if (_imopVarPre372) {
/*[74]*/
/*[74]*/
double ( *v )[12 / 2 * 2 + 1][12 / 2 * 2 + 1][5];
/*[74]*/
double *sum;
/*[74]*/
v = rsd;
/*[74]*/
sum = rsdnm;
/*[74]*/
int i_imopVarPre93;
/*[74]*/
int j_imopVarPre94;
/*[74]*/
int k_imopVarPre95;
/*[74]*/
int m_imopVarPre96;
/*[74]*/
double sum0 = 0.0;
/*[74]*/
double sum1 = 0.0;
/*[74]*/
double sum2 = 0.0;
/*[74]*/
double sum3 = 0.0;
/*[74]*/
double sum4 = 0.0;
/*[74]*/
#pragma omp single nowait
{
/*[74]*/
/*[74]*/
/*[74]*/
/*[74]*/
/*[74]*/
for (m_imopVarPre96 = 0; m_imopVarPre96 < 5; m_imopVarPre96++) {
/*[74]*/
/*[74]*/
sum[m_imopVarPre96] = 0.0;
}
}
/*[74]*/
// #pragma omp dummyFlush BARRIER_START
/*[74]*/
#pragma omp barrier
/*[75]*/
#pragma omp for nowait
/*[75]*/
/*[75]*/
/*[75]*/
for (i_imopVarPre93 = ist; i_imopVarPre93 <= iend; i_imopVarPre93++) {
/*[75]*/
/*[75]*/
/*[75]*/
/*[75]*/
/*[75]*/
for (j_imopVarPre94 = jst; j_imopVarPre94 <= jend; j_imopVarPre94++) {
/*[75]*/
/*[75]*/
/*[75]*/
/*[75]*/
/*[75]*/
for (k_imopVarPre95 = 1; k_imopVarPre95 <= nz0 - 2; k_imopVarPre95++) {
/*[75]*/
/*[75]*/
sum0 = sum0 + v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][0] * v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][0];
/*[75]*/
sum1 = sum1 + v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][1] * v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][1];
/*[75]*/
sum2 = sum2 + v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][2] * v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][2];
/*[75]*/
sum3 = sum3 + v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][3] * v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][3];
/*[75]*/
sum4 = sum4 + v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][4] * v[i_imopVarPre93][j_imopVarPre94][k_imopVarPre95][4];
}
}
}
/*[75]*/
// #pragma omp dummyFlush CRITICAL_START
/*[75]*/
#pragma omp critical
{
/*[75]*/
/*[75]*/
sum[0] += sum0;
/*[75]*/
sum[1] += sum1;
/*[75]*/
sum[2] += sum2;
/*[75]*/
sum[3] += sum3;
/*[75]*/
sum[4] += sum4;
}
/*[75]*/
// #pragma omp dummyFlush CRITICAL_END
/*[75]*/
// #pragma omp dummyFlush BARRIER_START
/*[75]*/
#pragma omp barrier
/*[76]*/
#pragma omp single nowait
{
/*[76]*/
/*[76]*/
/*[76]*/
/*[76]*/
/*[76]*/
for (m_imopVarPre96 = 0; m_imopVarPre96 < 5; m_imopVarPre96++) {
/*[76]*/
/*[76]*/
double _imopVarPre154;
/*[76]*/
double _imopVarPre155;
/*[76]*/
_imopVarPre154 = sum[m_imopVarPre96] / ((nx0 - 2) * (ny0 - 2) * (nz0 - 2));
/*[76]*/
_imopVarPre155 = sqrt(_imopVarPre154);
/*[76]*/
/*[76]*/
sum[m_imopVarPre96] = _imopVarPre155;
}
}
}
/*[74, 76]*/
// #pragma omp dummyFlush BARRIER_START
/*[74, 76]*/
#pragma omp barrier
/*[75, 77]*/
#pragma omp master
{
/*[75, 77]*/
/*[75, 77]*/
_imopVarPre377 = (rsdnm[0] < tolrsd[0]);
/*[75, 77]*/
/*[75, 77]*/
if (_imopVarPre377) {
/*[75, 77]*/
/*[75, 77]*/
_imopVarPre378 = (rsdnm[1] < tolrsd[1]);
/*[75, 77]*/
/*[75, 77]*/
if (_imopVarPre378) {
/*[75, 77]*/
/*[75, 77]*/
_imopVarPre379 = (rsdnm[2] < tolrsd[2]);
/*[75, 77]*/
/*[75, 77]*/
if (_imopVarPre379) {
/*[75, 77]*/
/*[75, 77]*/
_imopVarPre380 = (rsdnm[3] < tolrsd[3]);
/*[75, 77]*/
/*[75, 77]*/
if (_imopVarPre380) {
/*[75, 77]*/
/*[75, 77]*/
_imopVarPre380 = (rsdnm[4] < tolrsd[4]);
}
/*[75, 77]*/
_imopVarPre379 = _imopVarPre380;
}
/*[75, 77]*/
_imopVarPre378 = _imopVarPre379;
}
/*[75, 77]*/
_imopVarPre377 = _imopVarPre378;
}
/*[75, 77]*/
/*[75, 77]*/
if (_imopVarPre377) {
/*[75, 77]*/
/*[75, 77]*/
exit(1);
/*[75, 77]*/
}
}
}
}
/*[]*/
timer_stop(1);
/*[]*/
/*[]*/
maxtime = timer_read(1);
/*[]*/
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
/*[]*/
static void verify(double xcr[5], double xce[5] , double xci , char *class , boolean *verified) {
/*[]*/
/*[]*/
double xcrref[5];
/*[]*/
double xceref[5];
/*[]*/
double xciref;
/*[]*/
double xcrdif[5];
/*[]*/
double xcedif[5];
/*[]*/
double xcidif;
/*[]*/
double epsilon;
/*[]*/
double dtref;
/*[]*/
int m;
/*[]*/
epsilon = 1.0e-08;
/*[]*/
*class = 'U';
/*[]*/
*verified = 1;
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (m = 0; m < 5; m++) {
/*[]*/
/*[]*/
xcrref[m] = 1.0;
/*[]*/
xceref[m] = 1.0;
}
/*[]*/
xciref = 1.0;
/*[]*/
int _imopVarPre384;
/*[]*/
int _imopVarPre385;
/*[]*/
int _imopVarPre386;
/*[]*/
_imopVarPre384 = nx0 == 12;
/*[]*/
/*[]*/
if (_imopVarPre384) {
/*[]*/
/*[]*/
_imopVarPre385 = ny0 == 12;
/*[]*/
/*[]*/
if (_imopVarPre385) {
/*[]*/
/*[]*/
_imopVarPre386 = nz0 == 12;
/*[]*/
/*[]*/
if (_imopVarPre386) {
/*[]*/
/*[]*/
_imopVarPre386 = itmax == 50;
}
/*[]*/
_imopVarPre385 = _imopVarPre386;
}
/*[]*/
_imopVarPre384 = _imopVarPre385;
}
/*[]*/
/*[]*/
if (_imopVarPre384) {
/*[]*/
/*[]*/
*class = 'S';
/*[]*/
dtref = 5.0e-1;
/*[]*/
xcrref[0] = 1.6196343210976702e-02;
/*[]*/
xcrref[1] = 2.1976745164821318e-03;
/*[]*/
xcrref[2] = 1.5179927653399185e-03;
/*[]*/
xcrref[3] = 1.5029584435994323e-03;
/*[]*/
xcrref[4] = 3.4264073155896461e-02;
/*[]*/
xceref[0] = 6.4223319957960924e-04;
/*[]*/
xceref[1] = 8.4144342047347926e-05;
/*[]*/
xceref[2] = 5.8588269616485186e-05;
/*[]*/
xceref[3] = 5.8474222595157350e-05;
/*[]*/
xceref[4] = 1.3103347914111294e-03;
/*[]*/
xciref = 7.8418928865937083;
} else {
/*[]*/
/*[]*/
int _imopVarPre390;
/*[]*/
int _imopVarPre391;
/*[]*/
int _imopVarPre392;
/*[]*/
_imopVarPre390 = nx0 == 33;
/*[]*/
/*[]*/
if (_imopVarPre390) {
/*[]*/
/*[]*/
_imopVarPre391 = ny0 == 33;
/*[]*/
/*[]*/
if (_imopVarPre391) {
/*[]*/
/*[]*/
_imopVarPre392 = nz0 == 33;
/*[]*/
/*[]*/
if (_imopVarPre392) {
/*[]*/
/*[]*/
_imopVarPre392 = itmax == 300;
}
/*[]*/
_imopVarPre391 = _imopVarPre392;
}
/*[]*/
_imopVarPre390 = _imopVarPre391;
}
/*[]*/
/*[]*/
if (_imopVarPre390) {
/*[]*/
/*[]*/
*class = 'W';
/*[]*/
dtref = 1.5e-3;
/*[]*/
xcrref[0] = 0.1236511638192e+02;
/*[]*/
xcrref[1] = 0.1317228477799e+01;
/*[]*/
xcrref[2] = 0.2550120713095e+01;
/*[]*/
xcrref[3] = 0.2326187750252e+01;
/*[]*/
xcrref[4] = 0.2826799444189e+02;
/*[]*/
xceref[0] = 0.4867877144216;
/*[]*/
xceref[1] = 0.5064652880982e-01;
/*[]*/
xceref[2] = 0.9281818101960e-01;
/*[]*/
xceref[3] = 0.8570126542733e-01;
/*[]*/
xceref[4] = 0.1084277417792e+01;
/*[]*/
xciref = 0.1161399311023e+02;
} else {
/*[]*/
/*[]*/
int _imopVarPre396;
/*[]*/
int _imopVarPre397;
/*[]*/
int _imopVarPre398;
/*[]*/
_imopVarPre396 = nx0 == 64;
/*[]*/
/*[]*/
if (_imopVarPre396) {
/*[]*/
/*[]*/
_imopVarPre397 = ny0 == 64;
/*[]*/
/*[]*/
if (_imopVarPre397) {
/*[]*/
/*[]*/
_imopVarPre398 = nz0 == 64;
/*[]*/
/*[]*/
if (_imopVarPre398) {
/*[]*/
/*[]*/
_imopVarPre398 = itmax == 250;
}
/*[]*/
_imopVarPre397 = _imopVarPre398;
}
/*[]*/
_imopVarPre396 = _imopVarPre397;
}
/*[]*/
/*[]*/
if (_imopVarPre396) {
/*[]*/
/*[]*/
*class = 'A';
/*[]*/
dtref = 2.0e+0;
/*[]*/
xcrref[0] = 7.7902107606689367e+02;
/*[]*/
xcrref[1] = 6.3402765259692870e+01;
/*[]*/
xcrref[2] = 1.9499249727292479e+02;
/*[]*/
xcrref[3] = 1.7845301160418537e+02;
/*[]*/
xcrref[4] = 1.8384760349464247e+03;
/*[]*/
xceref[0] = 2.9964085685471943e+01;
/*[]*/
xceref[1] = 2.8194576365003349;
/*[]*/
xceref[2] = 7.3473412698774742;
/*[]*/
xceref[3] = 6.7139225687777051;
/*[]*/
xceref[4] = 7.0715315688392578e+01;
/*[]*/
xciref = 2.6030925604886277e+01;
} else {
/*[]*/
/*[]*/
int _imopVarPre402;
/*[]*/
int _imopVarPre403;
/*[]*/
int _imopVarPre404;
/*[]*/
_imopVarPre402 = nx0 == 102;
/*[]*/
/*[]*/
if (_imopVarPre402) {
/*[]*/
/*[]*/
_imopVarPre403 = ny0 == 102;
/*[]*/
/*[]*/
if (_imopVarPre403) {
/*[]*/
/*[]*/
_imopVarPre404 = nz0 == 102;
/*[]*/
/*[]*/
if (_imopVarPre404) {
/*[]*/
/*[]*/
_imopVarPre404 = itmax == 250;
}
/*[]*/
_imopVarPre403 = _imopVarPre404;
}
/*[]*/
_imopVarPre402 = _imopVarPre403;
}
/*[]*/
/*[]*/
if (_imopVarPre402) {
/*[]*/
/*[]*/
*class = 'B';
/*[]*/
dtref = 2.0e+0;
/*[]*/
xcrref[0] = 3.5532672969982736e+03;
/*[]*/
xcrref[1] = 2.6214750795310692e+02;
/*[]*/
xcrref[2] = 8.8333721850952190e+02;
/*[]*/
xcrref[3] = 7.7812774739425265e+02;
/*[]*/
xcrref[4] = 7.3087969592545314e+03;
/*[]*/
xceref[0] = 1.1401176380212709e+02;
/*[]*/
xceref[1] = 8.1098963655421574;
/*[]*/
xceref[2] = 2.8480597317698308e+01;
/*[]*/
xceref[3] = 2.5905394567832939e+01;
/*[]*/
xceref[4] = 2.6054907504857413e+02;
/*[]*/
xciref = 4.7887162703308227e+01;
} else {
/*[]*/
/*[]*/
int _imopVarPre408;
/*[]*/
int _imopVarPre409;
/*[]*/
int _imopVarPre410;
/*[]*/
_imopVarPre408 = nx0 == 162;
/*[]*/
/*[]*/
if (_imopVarPre408) {
/*[]*/
/*[]*/
_imopVarPre409 = ny0 == 162;
/*[]*/
/*[]*/
if (_imopVarPre409) {
/*[]*/
/*[]*/
_imopVarPre410 = nz0 == 162;
/*[]*/
/*[]*/
if (_imopVarPre410) {
/*[]*/
/*[]*/
_imopVarPre410 = itmax == 250;
}
/*[]*/
_imopVarPre409 = _imopVarPre410;
}
/*[]*/
_imopVarPre408 = _imopVarPre409;
}
/*[]*/
/*[]*/
if (_imopVarPre408) {
/*[]*/
/*[]*/
*class = 'C';
/*[]*/
dtref = 2.0e+0;
/*[]*/
xcrref[0] = 1.03766980323537846e+04;
/*[]*/
xcrref[1] = 8.92212458801008552e+02;
/*[]*/
xcrref[2] = 2.56238814582660871e+03;
/*[]*/
xcrref[3] = 2.19194343857831427e+03;
/*[]*/
xcrref[4] = 1.78078057261061185e+04;
/*[]*/
xceref[0] = 2.15986399716949279e+02;
/*[]*/
xceref[1] = 1.55789559239863600e+01;
/*[]*/
xceref[2] = 5.41318863077207766e+01;
/*[]*/
xceref[3] = 4.82262643154045421e+01;
/*[]*/
xceref[4] = 4.55902910043250358e+02;
/*[]*/
xciref = 6.66404553572181300e+01;
} else {
/*[]*/
/*[]*/
*verified = 0;
}
}
}
}
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (m = 0; m < 5; m++) {
/*[]*/
/*[]*/
double _imopVarPre412;
/*[]*/
double _imopVarPre413;
/*[]*/
_imopVarPre412 = (xcr[m] - xcrref[m]) / xcrref[m];
/*[]*/
_imopVarPre413 = fabs(_imopVarPre412);
/*[]*/
/*[]*/
xcrdif[m] = _imopVarPre413;
/*[]*/
double _imopVarPre415;
/*[]*/
double _imopVarPre416;
/*[]*/
_imopVarPre415 = (xce[m] - xceref[m]) / xceref[m];
/*[]*/
_imopVarPre416 = fabs(_imopVarPre415);
/*[]*/
/*[]*/
xcedif[m] = _imopVarPre416;
}
/*[]*/
double _imopVarPre418;
/*[]*/
double _imopVarPre419;
/*[]*/
_imopVarPre418 = (xci - xciref) / xciref;
/*[]*/
_imopVarPre419 = fabs(_imopVarPre418);
/*[]*/
/*[]*/
xcidif = _imopVarPre419;
/*[]*/
/*[]*/
if (*class != 'U') {
/*[]*/
/*[]*/
char _imopVarPre421;
/*[]*/
_imopVarPre421 = *class;
/*[]*/
printf("\n Verification being performed for class %1c\n", _imopVarPre421);
/*[]*/
/*[]*/
printf(" Accuracy setting for epsilon = %20.13e\n", epsilon);
/*[]*/
/*[]*/
double _imopVarPre424;
/*[]*/
double _imopVarPre425;
/*[]*/
_imopVarPre424 = dt - dtref;
/*[]*/
_imopVarPre425 = fabs(_imopVarPre424);
/*[]*/
/*[]*/
/*[]*/
if (_imopVarPre425 > epsilon) {
/*[]*/
/*[]*/
*verified = 0;
/*[]*/
*class = 'U';
/*[]*/
printf(" DT does not match the reference value of %15.8e\n", dtref);
/*[]*/
}
} else {
/*[]*/
/*[]*/
printf(" Unknown class\n");
/*[]*/
}
/*[]*/
/*[]*/
if (*class != 'U') {
/*[]*/
/*[]*/
printf(" Comparison of RMS-norms of residual\n");
/*[]*/
} else {
/*[]*/
/*[]*/
printf(" RMS-norms of residual\n");
/*[]*/
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (m = 0; m < 5; m++) {
/*[]*/
/*[]*/
/*[]*/
if (*class == 'U') {
/*[]*/
/*[]*/
double _imopVarPre427;
/*[]*/
_imopVarPre427 = xcr[m];
/*[]*/
printf(" %2d %20.13e\n", m, _imopVarPre427);
/*[]*/
} else {
/*[]*/
/*[]*/
/*[]*/
if (xcrdif[m] > epsilon) {
/*[]*/
/*[]*/
*verified = 0;
/*[]*/
double _imopVarPre431;
/*[]*/
double _imopVarPre432;
/*[]*/
double _imopVarPre433;
/*[]*/
_imopVarPre431 = xcrdif[m];
/*[]*/
_imopVarPre432 = xcrref[m];
/*[]*/
_imopVarPre433 = xcr[m];
/*[]*/
printf(" FAILURE: %2d %20.13e%20.13e%20.13e\n", m, _imopVarPre433, _imopVarPre432, _imopVarPre431);
/*[]*/
} else {
/*[]*/
/*[]*/
double _imopVarPre437;
/*[]*/
double _imopVarPre438;
/*[]*/
double _imopVarPre439;
/*[]*/
_imopVarPre437 = xcrdif[m];
/*[]*/
_imopVarPre438 = xcrref[m];
/*[]*/
_imopVarPre439 = xcr[m];
/*[]*/
printf(" %2d %20.13e%20.13e%20.13e\n", m, _imopVarPre439, _imopVarPre438, _imopVarPre437);
/*[]*/
}
}
}
/*[]*/
/*[]*/
if (*class != 'U') {
/*[]*/
/*[]*/
printf(" Comparison of RMS-norms of solution error\n");
/*[]*/
} else {
/*[]*/
/*[]*/
printf(" RMS-norms of solution error\n");
/*[]*/
}
/*[]*/
/*[]*/
/*[]*/
/*[]*/
for (m = 0; m < 5; m++) {
/*[]*/
/*[]*/
/*[]*/
if (*class == 'U') {
/*[]*/
/*[]*/
double _imopVarPre441;
/*[]*/
_imopVarPre441 = xce[m];
/*[]*/
printf(" %2d %20.13e\n", m, _imopVarPre441);
/*[]*/
} else {
/*[]*/
/*[]*/
/*[]*/
if (xcedif[m] > epsilon) {
/*[]*/
/*[]*/
*verified = 0;
/*[]*/
double _imopVarPre445;
/*[]*/
double _imopVarPre446;
/*[]*/
double _imopVarPre447;
/*[]*/
_imopVarPre445 = xcedif[m];
/*[]*/
_imopVarPre446 = xceref[m];
/*[]*/
_imopVarPre447 = xce[m];
/*[]*/
printf(" FAILURE: %2d %20.13e%20.13e%20.13e\n", m, _imopVarPre447, _imopVarPre446, _imopVarPre445);
/*[]*/
} else {
/*[]*/
/*[]*/
double _imopVarPre451;
/*[]*/
double _imopVarPre452;
/*[]*/
double _imopVarPre453;
/*[]*/
_imopVarPre451 = xcedif[m];
/*[]*/
_imopVarPre452 = xceref[m];
/*[]*/
_imopVarPre453 = xce[m];
/*[]*/
printf(" %2d %20.13e%20.13e%20.13e\n", m, _imopVarPre453, _imopVarPre452, _imopVarPre451);
/*[]*/
}
}
}
/*[]*/
/*[]*/
if (*class != 'U') {
/*[]*/
/*[]*/
printf(" Comparison of surface integral\n");
/*[]*/
} else {
/*[]*/
/*[]*/
printf(" Surface integral\n");
/*[]*/
}
/*[]*/
/*[]*/
if (*class == 'U') {
/*[]*/
/*[]*/
printf(" %20.13e\n", xci);
/*[]*/
} else {
/*[]*/
/*[]*/
/*[]*/
if (xcidif > epsilon) {
/*[]*/
/*[]*/
*verified = 0;
/*[]*/
printf(" FAILURE: %20.13e%20.13e%20.13e\n", xci, xciref, xcidif);
/*[]*/
} else {
/*[]*/
/*[]*/
printf(" %20.13e%20.13e%20.13e\n", xci, xciref, xcidif);
/*[]*/
}
}
/*[]*/
/*[]*/
if (*class == 'U') {
/*[]*/
/*[]*/
printf(" No reference values provided\n");
/*[]*/
/*[]*/
printf(" No verification performed\n");
/*[]*/
} else {
/*[]*/
/*[]*/
/*[]*/
if (*verified) {
/*[]*/
/*[]*/
printf(" Verification Successful\n");
/*[]*/
} else {
/*[]*/
/*[]*/
printf(" Verification failed\n");
/*[]*/
}
}
}
|
m_image.h | /*======================================================================
Maratis Tiny C Library
version 1.0
------------------------------------------------------------------------
Copyright (c) 2015 Anael Seghezzi <www.maratis3d.org>
Copyright (c) 2015 Marti Maria Saguer
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would
be appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not
be misrepresented as being the original software.
3. This notice may not be removed or altered from any source
distribution.
========================================================================*/
/*
Image manipulation :
- transformation (re-frame, mirror, rotation)
- conversions (float, half, ubyte, linear, greyscale...)
- filtering (convolution, Gaussian blur, Harris)
- scaling (pyramid, generic, bilinear)
- morphology (flood-fill, dilate, erode, thinning)
- edge and corner detection (Sobel, Harris)
to create the implementation,
#define M_IMAGE_IMPLEMENTATION
in *one* C/CPP file that includes this file.
optional:
include after *m_math.h*
//////////////////////////////////////////////////////
Example: create a 256x256 float image with 1 component:
struct m_image foo1 = M_IMAGE_IDENTITY();
struct m_image foo2 = M_IMAGE_IDENTITY();
int x, y;
m_image_create(&foo1, M_FLOAT, 256, 256, 1);
memset(foo1.data, 0, foo1.size * sizeof(float)); // clear to zero
y = 128; x = 128;
((float *)foo1.data)[y * foo1.width + x] = 1.0f; // set (x, y) pixel to one
m_image_gaussian_blur(&foo2, &foo1, 3, 3); // apply Gaussian blur
m_image_destroy(&foo2);
m_image_destroy(&foo1);
*/
#ifndef M_IMAGE_H
#define M_IMAGE_H
#include <stdint.h>
#define M_IMAGE_VERSION 1
#ifdef __cplusplus
extern "C" {
#endif
#ifndef MIAPI
#define MIAPI extern
#endif
#define M_VOID 0
#define M_BOOL 1
#define M_BYTE 2
#define M_UBYTE 3
#define M_SHORT 4
#define M_USHORT 5
#define M_INT 6
#define M_UINT 7
#define M_HALF 8
#define M_FLOAT 9
#define M_DOUBLE 10
struct m_image
{
void *data;
int size;
int width;
int height;
int comp;
char type;
};
/* identity, must be used before calling m_image_create */
#define M_IMAGE_IDENTITY() {0, 0, 0, 0, 0, 0}
/* m_image type util */
MIAPI int m_type_sizeof(char type);
/* fully supported types are: M_UBYTE, M_USHORT, M_HALF, M_FLOAT
partially supported types: M_BYTE, M_SHORT, M_INT, M_UINT (no support for conversion) */
MIAPI void m_image_create(struct m_image *image, char type, int width, int height, int comp);
MIAPI void m_image_destroy(struct m_image *image);
MIAPI void m_image_ubyte_to_float(struct m_image *dest, const struct m_image *src);
MIAPI void m_image_ushort_to_float(struct m_image *dest, const struct m_image *src);
MIAPI void m_image_half_to_float(struct m_image *dest, const struct m_image *src);
MIAPI void m_image_float_to_ubyte(struct m_image *dest, const struct m_image *src);
MIAPI void m_image_float_to_ushort(struct m_image *dest, const struct m_image *src);
MIAPI void m_image_float_to_half(struct m_image *dest, const struct m_image *src);
MIAPI void m_image_copy(struct m_image *dest, const struct m_image *src);
MIAPI void m_image_copy_sub_image(struct m_image *dest, const struct m_image *src, int x, int y, int w, int h);
MIAPI void m_image_reframe_zero(struct m_image *dest, const struct m_image *src, int left, int top, int right, int bottom);
MIAPI void m_image_reframe(struct m_image *dest, const struct m_image *src, int left, int top, int right, int bottom);
MIAPI void m_image_extract_component(struct m_image *dest, const struct m_image *src, int c);
MIAPI void m_image_rotate_left(struct m_image *dest, const struct m_image *src);
MIAPI void m_image_rotate_right(struct m_image *dest, const struct m_image *src);
MIAPI void m_image_rotate_180(struct m_image *dest, const struct m_image *src);
MIAPI void m_image_mirror_x(struct m_image *dest, const struct m_image *src);
MIAPI void m_image_mirror_y(struct m_image *dest, const struct m_image *src);
MIAPI void m_image_premultiply(struct m_image *dest, const struct m_image *src);
MIAPI void m_image_unpremultiply(struct m_image *dest, const struct m_image *src);
MIAPI void m_image_sRGB_to_linear(struct m_image *dest, const struct m_image *src);
MIAPI void m_image_linear_to_sRGB(struct m_image *dest, const struct m_image *src);
/* float/half conversion */
MIAPI float m_half2float(uint16_t h);
MIAPI uint16_t m_float2half(float flt);
/* raw processing */
MIAPI void m_sRGB_to_linear(float *dest, const float *src, int size);
MIAPI void m_linear_to_sRGB(float *dest, const float *src, int size);
MIAPI void m_RGB_to_HSV(float *dest, const float *src);
MIAPI void m_HSV_to_RGB(float *dest, const float *src);
MIAPI void m_RGB_to_HSL(float *dest, const float *src);
MIAPI void m_HSL_to_RGB(float *dest, const float *src);
MIAPI void m_gaussian_kernel(float *dest, int size, float radius);
MIAPI void m_sst(float *dest, const float *src, int count);
MIAPI void m_harris_response(float *dest, const float *src, int count);
MIAPI void m_tfm(float *dest, const float *src, int count);
MIAPI void m_normalize(float *dest, const float *src, int size); /* dest = src / norm(src) */
MIAPI void m_normalize_sum(float *dest, const float *src, int size); /* dest = src / sum(src) */
MIAPI float m_mean(const float *src, int size);
MIAPI float m_squared_distance(const float *src1, const float *src2, int size);
MIAPI float m_squared_distance_dispatch(const float *src1, const float *src2, int size);
MIAPI float m_convolution(const float *src1, const float *src2, int size); /* a dot product really */
MIAPI float m_chi_squared_distance(const float *src1, const float *src2, int size); /* good at estimating signed hystograms difference */
/* conversion to 1 component (float image only) */
MIAPI void m_image_grey(struct m_image *dest, const struct m_image *src); /* from RGB src */
MIAPI void m_image_max(struct m_image *dest, const struct m_image *src);
MIAPI void m_image_max_abs(struct m_image *dest, const struct m_image *src);
/* summed area table (also called "integral image") */
MIAPI void m_image_summed_area(struct m_image *dest, const struct m_image *src);
/* convolutions (float image only) */
/* if alpha channel, src image must be pre-multiplied */
MIAPI void m_image_convolution_h_raw(struct m_image *dest, const struct m_image *src, float *kernel, int size);
MIAPI void m_image_convolution_v_raw(struct m_image *dest, const struct m_image *src, float *kernel, int size);
MIAPI void m_image_convolution_h(struct m_image *dest, const struct m_image *src, float *kernel, int size); /* horizontal */
MIAPI void m_image_convolution_v(struct m_image *dest, const struct m_image *src, float *kernel, int size); /* vertical */
MIAPI void m_image_gaussian_blur(struct m_image *dest, const struct m_image *src, float dx, float dy);
/* edge and corner (float 1 component image only) */
MIAPI void m_image_sobel(struct m_image *dest, const struct m_image *src);
MIAPI void m_image_harris(struct m_image *dest, const struct m_image *src, float radius);
/* morphology (ubyte 1 component image only) */
MIAPI int m_image_floodfill_4x(struct m_image *dest, int x, int y, uint8_t ref, uint8_t value, uint16_t *stack, int stack_size);
MIAPI int m_image_floodfill_8x(struct m_image *dest, int x, int y, uint8_t ref, uint8_t value, uint16_t *stack, int stack_size);
MIAPI void m_image_dilate(struct m_image *dest, const struct m_image *src);
MIAPI void m_image_erode(struct m_image *dest, const struct m_image *src);
MIAPI void m_image_edge_4x(struct m_image *dest, const struct m_image *src, uint8_t ref);
MIAPI void m_image_thin(struct m_image *dest);
/* non maxima suppression (float image only) */
MIAPI void m_image_non_max_supp(struct m_image *dest, const struct m_image *src, int radius, float threshold);
/* detect Harris corners
margin: margin around the image to exclude corners
radius: maxima radius
threshold: Harris response threshold
corners: corners coordinates of size max_count * 2
max_count: maximum number of corners
return corner count */
MIAPI int m_image_corner_harris(const struct m_image *src, int margin, float radius, float threshold, int *corners, int max_count);
/* resizing (float image only) */
MIAPI void m_image_sub_pixel(const struct m_image *src, float x, float y, float *result);
MIAPI void m_image_pyrdown(struct m_image *dest, const struct m_image *src);
MIAPI void m_image_resize(struct m_image *dest, const struct m_image *src, int new_width, int new_height);
#ifdef __cplusplus
}
#endif
/*
----------------------------------------------------------------------*/
#endif /* M_IMAGE_H */
#ifdef M_IMAGE_IMPLEMENTATION
#include <stdlib.h>
#include <stdio.h>
#include <memory.h>
#include <math.h>
#include <float.h>
#include <assert.h>
#ifndef M_SAFE_FREE
#define M_SAFE_FREE(p) {if (p) {free(p); (p) = NULL;}}
#endif
#ifndef M_MIN
#define M_MIN(a, b) (((a) < (b)) ? (a) : (b))
#endif
#ifndef M_MAX
#define M_MAX(a, b) (((a) > (b)) ? (a) : (b))
#endif
#ifndef M_ABS
#define M_ABS(a) (((a) < 0) ? -(a) : (a))
#endif
#ifndef M_CLAMP
#define M_CLAMP(x, low, high) (((x) > (high)) ? (high) : (((x) < (low)) ? (low) : (x)))
#endif
MIAPI void m_linear_to_sRGB(float *dest, const float *src, int size)
{
int i;
for (i = 0; i < size; i++) {
if (*src < 0.0031308f)
*dest = 12.92f * (*src);
else
*dest = (1.0f + 0.055f) * powf(*src, 1.0f/2.4f) - 0.055f;
dest++;
src++;
}
}
MIAPI void m_sRGB_to_linear(float *dest, const float *src, int size)
{
int i;
for (i = 0; i < size; i++) {
if (*src <= 0.03928f)
*dest = *src / 12.92f;
else
*dest = powf((*src + 0.055f) / 1.055f, 2.4f);
dest++;
src++;
}
}
MIAPI void m_RGB_to_HSV(float *dest, const float *src)
{
float r = src[0];
float g = src[1];
float b = src[2];
float h = 0;
float s = 0;
float v = 0;
float min = r;
float max = r;
float delta;
min = M_MIN(min, g);
min = M_MIN(min, b);
max = M_MAX(max, g);
max = M_MAX(max, b);
delta = max - min;
v = max;
if (delta == 0 || max == 0) {
dest[0] = h; dest[1] = s; dest[2] = v;
return;
}
s = delta / max;
if (r == max)
h = (g - b) / delta;
else if (g == max)
h = 2 + (b - r) / delta;
else
h = 4 + (r - g) / delta;
h *= 60;
if (h < 0) h += 360;
dest[0] = h; dest[1] = s; dest[2] = v;
}
MIAPI void m_HSV_to_RGB(float *dest, const float *src)
{
float r, g, b;
float f, p, q, t;
float h = src[0];
float s = src[1];
float v = src[2];
int i;
if (s == 0) {
dest[0] = v; dest[1] = v; dest[2] = v;
return;
}
h /= 60.0f;
i = (int)floorf(h);
f = h - i;
p = v * (1 - s);
q = v * (1 - s * f);
t = v * (1 - s * (1 - f));
switch (i) {
case 0:
r = v; g = t; b = p;
break;
case 1:
r = q; g = v; b = p;
break;
case 2:
r = p; g = v; b = t;
break;
case 3:
r = p; g = q; b = v;
break;
case 4:
r = t; g = p; b = v;
break;
default:
r = v; g = p; b = q;
break;
}
dest[0] = r; dest[1] = g; dest[2] = b;
}
MIAPI void m_RGB_to_HSL(float *dest, const float *src)
{
float h, s, l, dr, dg, db;
float r = src[0];
float g = src[1];
float b = src[2];
float min = r;
float max = r;
float delta;
min = M_MIN(min, g);
min = M_MIN(min, b);
max = M_MAX(max, g);
max = M_MAX(max, b);
delta = max - min;
h = 0;
s = 0;
l = (max + min) * 0.5f;
if (max == 0) {
dest[0] = h; dest[1] = s; dest[2] = l;
return;
}
if(r == max)
h = fmodf(((g - b) / delta), 6.0f);
else if(g == max)
h = ((b - r) / delta) + 2.0f;
else
h = ((r - g) / delta) + 4.0f;
h *= 60.0f;
if (h < 0) h += 360;
s = delta / (1.0f - fabsf(2.0f * l - 1.0f));
dest[0] = h;
dest[1] = s;
dest[2] = l;
}
MIAPI void m_HSL_to_RGB(float *dest, const float *src)
{
float h = src[0];
float s = src[1];
float l = src[2];
float c, m, x;
if (s == 0) {
dest[0] = l; dest[1] = l; dest[2] = l;
return;
}
c = (1.0f - fabsf(2.0f * l - 1.0f)) * s;
m = 1.0f * (l - 0.5f * c);
x = c * (1.0f - fabsf(fmodf(h / 60.0f, 2) - 1.0f));
if (h >= 0.0f && h < 60.0f) {
dest[0] = c + m;
dest[1] = x + m;
dest[2] = m;
}
else if (h >= 60.0f && h < 120.0f) {
dest[0] = x + m;
dest[1] = c + m;
dest[2] = m;
}
else if (h < 120.0f && h < 180.0f) {
dest[0] = m;
dest[1] = c + m;
dest[2] = x + m;
}
else if (h >= 180.0f && h < 240.0f) {
dest[0] = m;
dest[1] = x + m;
dest[2] = c + m;
}
else if (h >= 240.0f && h < 300.0f) {
dest[0] = x + m;
dest[1] = m;
dest[2] = c + m;
}
else if (h >= 300.0f && h < 360.0f) {
dest[0] = c + m;
dest[1] = m;
dest[2] = x + m;
}
else {
dest[0] = m;
dest[1] = m;
dest[2] = m;
}
}
MIAPI void m_gaussian_kernel(float *dest, int size, float radius)
{
float *k;
float rs, s2, sum;
float sigma = 1.6f;
float tetha = 2.25f;
int r, hsize = size / 2;
s2 = 1.0f / expf(sigma * sigma * tetha);
rs = sigma / radius;
k = dest;
sum = 0.0f;
/* compute gaussian kernel */
for (r = -hsize; r <= hsize; r++) {
float x = r * rs;
float v = (1.0f / expf(x * x)) - s2;
v = M_MAX(v, 0);
*k = v;
sum += v;
k++;
}
/* normalize */
if (sum > 0.0f) {
float isum = 1.0f / sum;
for (r = 0; r < size; r++)
dest[r] *= isum;
}
}
MIAPI void m_sst(float *dest, const float *src, int count)
{
register int i;
register float dx;
register float dy;
for (i = 0; i < count; i++) {
dx = src[0];
dy = src[1];
dest[0] = dx*dx;
dest[1] = dy*dy;
dest[2] = dx*dy;
src += 2;
dest += 3;
}
}
MIAPI void m_harris_response(float *dest, const float *src, int count)
{
int i;
for (i = 0; i < count; i++) {
float dx2 = src[0];
float dy2 = src[1];
float dxy = src[2];
*dest = (dx2 * dy2 - dxy * dxy) / (dx2 + dy2 + 1e-8f);
src += 3;
dest++;
}
}
MIAPI void m_tfm(float *dest, const float *src, int count)
{
int i;
for (i = 0; i < count; i++) {
if (src[0] < src[1]) {
float dx2 = src[0];
float dy2 = src[1];
float dxy = src[2];
float sqd = (dy2 * dy2) - (2.0f * dx2 * dy2) + (dx2 * dx2) + (4.0f * dxy * dxy);
float lambda = 0.5f * (dy2 + dx2 + sqrtf(M_MAX(0, sqd)));
dest[0] = dx2 - lambda;
dest[1] = dxy;
}
else {
float dy2 = src[0];
float dx2 = src[1];
float dxy = src[2];
float sqd = (dy2 * dy2) - (2.0f * dx2 * dy2) + (dx2 * dx2) + (4.0f * dxy * dxy);
float lambda = 0.5f * (dy2 + dx2 + sqrtf(M_MAX(0, sqd)));
dest[0] = dxy;
dest[1] = dx2 - lambda;
}
src += 3;
dest += 2;
}
}
MIAPI float m_chi_squared_distance(const float *src1, const float *src2, int size)
{
int i;
float score = 0;
for (i = 0; i < size; i++) {
float val1 = src1[i];
float val2 = src2[i];
/* chi squared distance */
if ((val1 + val2) > 0) {
float x = val2 - val1;
score += (x * x) / (val1 + val2);
}
}
return score * 0.5f;
}
MIAPI float m_convolution(const float *src1, const float *src2, int size)
{
float c = 0; int i;
for (i = 0; i < size; i++)
c += src1[i] * src2[i];
return c;
}
MIAPI void m_normalize(float *dest, const float *src, int size)
{
float sum = 0.0f; int i;
for(i = 0; i < size; i++)
sum += src[i] * src[i];
if (sum > 0.0f) {
sum = 1.0f / sqrtf(sum);
for(i = 0; i < size; i++)
dest[i] = src[i] * sum;
}
else if (dest != src) {
memset(dest, 0, size * sizeof(float));
}
}
MIAPI void m_normalize_sum(float *dest, const float *src, int size)
{
float sum = 0.0f; int i;
for(i = 0; i < size; i++)
sum += src[i];
if (sum > 0.0f) {
sum = 1.0f / sum;
for(i = 0; i < size; i++)
dest[i] = src[i] * sum;
}
else {
memset(dest, 0, size * sizeof(float));
}
}
MIAPI float m_mean(const float *src, int size)
{
float mean = 0; int i;
for (i = 0; i < size; i++)
mean += (*src++);
return mean / size;
}
MIAPI float m_squared_distance(const float *src1, const float *src2, int size)
{
register float score = 0.0f;
register int i;
register float x;
for (i = 0; i < size; i++) {
x = src2[i] - src1[i];
score += x * x;
}
return score;
}
/* m_half2float / m_float2half :
a big thanks to Marti Maria Saguer for allowing the use of this code
under the zlib license from "Little Color Management System" (cmshalf.c) */
/* This code is inspired in the paper "Fast Half Float Conversions"
by Jeroen van der Zijp */
static uint32_t m__mantissa[2048] = {
0x00000000, 0x33800000, 0x34000000, 0x34400000, 0x34800000, 0x34a00000,
0x34c00000, 0x34e00000, 0x35000000, 0x35100000, 0x35200000, 0x35300000,
0x35400000, 0x35500000, 0x35600000, 0x35700000, 0x35800000, 0x35880000,
0x35900000, 0x35980000, 0x35a00000, 0x35a80000, 0x35b00000, 0x35b80000,
0x35c00000, 0x35c80000, 0x35d00000, 0x35d80000, 0x35e00000, 0x35e80000,
0x35f00000, 0x35f80000, 0x36000000, 0x36040000, 0x36080000, 0x360c0000,
0x36100000, 0x36140000, 0x36180000, 0x361c0000, 0x36200000, 0x36240000,
0x36280000, 0x362c0000, 0x36300000, 0x36340000, 0x36380000, 0x363c0000,
0x36400000, 0x36440000, 0x36480000, 0x364c0000, 0x36500000, 0x36540000,
0x36580000, 0x365c0000, 0x36600000, 0x36640000, 0x36680000, 0x366c0000,
0x36700000, 0x36740000, 0x36780000, 0x367c0000, 0x36800000, 0x36820000,
0x36840000, 0x36860000, 0x36880000, 0x368a0000, 0x368c0000, 0x368e0000,
0x36900000, 0x36920000, 0x36940000, 0x36960000, 0x36980000, 0x369a0000,
0x369c0000, 0x369e0000, 0x36a00000, 0x36a20000, 0x36a40000, 0x36a60000,
0x36a80000, 0x36aa0000, 0x36ac0000, 0x36ae0000, 0x36b00000, 0x36b20000,
0x36b40000, 0x36b60000, 0x36b80000, 0x36ba0000, 0x36bc0000, 0x36be0000,
0x36c00000, 0x36c20000, 0x36c40000, 0x36c60000, 0x36c80000, 0x36ca0000,
0x36cc0000, 0x36ce0000, 0x36d00000, 0x36d20000, 0x36d40000, 0x36d60000,
0x36d80000, 0x36da0000, 0x36dc0000, 0x36de0000, 0x36e00000, 0x36e20000,
0x36e40000, 0x36e60000, 0x36e80000, 0x36ea0000, 0x36ec0000, 0x36ee0000,
0x36f00000, 0x36f20000, 0x36f40000, 0x36f60000, 0x36f80000, 0x36fa0000,
0x36fc0000, 0x36fe0000, 0x37000000, 0x37010000, 0x37020000, 0x37030000,
0x37040000, 0x37050000, 0x37060000, 0x37070000, 0x37080000, 0x37090000,
0x370a0000, 0x370b0000, 0x370c0000, 0x370d0000, 0x370e0000, 0x370f0000,
0x37100000, 0x37110000, 0x37120000, 0x37130000, 0x37140000, 0x37150000,
0x37160000, 0x37170000, 0x37180000, 0x37190000, 0x371a0000, 0x371b0000,
0x371c0000, 0x371d0000, 0x371e0000, 0x371f0000, 0x37200000, 0x37210000,
0x37220000, 0x37230000, 0x37240000, 0x37250000, 0x37260000, 0x37270000,
0x37280000, 0x37290000, 0x372a0000, 0x372b0000, 0x372c0000, 0x372d0000,
0x372e0000, 0x372f0000, 0x37300000, 0x37310000, 0x37320000, 0x37330000,
0x37340000, 0x37350000, 0x37360000, 0x37370000, 0x37380000, 0x37390000,
0x373a0000, 0x373b0000, 0x373c0000, 0x373d0000, 0x373e0000, 0x373f0000,
0x37400000, 0x37410000, 0x37420000, 0x37430000, 0x37440000, 0x37450000,
0x37460000, 0x37470000, 0x37480000, 0x37490000, 0x374a0000, 0x374b0000,
0x374c0000, 0x374d0000, 0x374e0000, 0x374f0000, 0x37500000, 0x37510000,
0x37520000, 0x37530000, 0x37540000, 0x37550000, 0x37560000, 0x37570000,
0x37580000, 0x37590000, 0x375a0000, 0x375b0000, 0x375c0000, 0x375d0000,
0x375e0000, 0x375f0000, 0x37600000, 0x37610000, 0x37620000, 0x37630000,
0x37640000, 0x37650000, 0x37660000, 0x37670000, 0x37680000, 0x37690000,
0x376a0000, 0x376b0000, 0x376c0000, 0x376d0000, 0x376e0000, 0x376f0000,
0x37700000, 0x37710000, 0x37720000, 0x37730000, 0x37740000, 0x37750000,
0x37760000, 0x37770000, 0x37780000, 0x37790000, 0x377a0000, 0x377b0000,
0x377c0000, 0x377d0000, 0x377e0000, 0x377f0000, 0x37800000, 0x37808000,
0x37810000, 0x37818000, 0x37820000, 0x37828000, 0x37830000, 0x37838000,
0x37840000, 0x37848000, 0x37850000, 0x37858000, 0x37860000, 0x37868000,
0x37870000, 0x37878000, 0x37880000, 0x37888000, 0x37890000, 0x37898000,
0x378a0000, 0x378a8000, 0x378b0000, 0x378b8000, 0x378c0000, 0x378c8000,
0x378d0000, 0x378d8000, 0x378e0000, 0x378e8000, 0x378f0000, 0x378f8000,
0x37900000, 0x37908000, 0x37910000, 0x37918000, 0x37920000, 0x37928000,
0x37930000, 0x37938000, 0x37940000, 0x37948000, 0x37950000, 0x37958000,
0x37960000, 0x37968000, 0x37970000, 0x37978000, 0x37980000, 0x37988000,
0x37990000, 0x37998000, 0x379a0000, 0x379a8000, 0x379b0000, 0x379b8000,
0x379c0000, 0x379c8000, 0x379d0000, 0x379d8000, 0x379e0000, 0x379e8000,
0x379f0000, 0x379f8000, 0x37a00000, 0x37a08000, 0x37a10000, 0x37a18000,
0x37a20000, 0x37a28000, 0x37a30000, 0x37a38000, 0x37a40000, 0x37a48000,
0x37a50000, 0x37a58000, 0x37a60000, 0x37a68000, 0x37a70000, 0x37a78000,
0x37a80000, 0x37a88000, 0x37a90000, 0x37a98000, 0x37aa0000, 0x37aa8000,
0x37ab0000, 0x37ab8000, 0x37ac0000, 0x37ac8000, 0x37ad0000, 0x37ad8000,
0x37ae0000, 0x37ae8000, 0x37af0000, 0x37af8000, 0x37b00000, 0x37b08000,
0x37b10000, 0x37b18000, 0x37b20000, 0x37b28000, 0x37b30000, 0x37b38000,
0x37b40000, 0x37b48000, 0x37b50000, 0x37b58000, 0x37b60000, 0x37b68000,
0x37b70000, 0x37b78000, 0x37b80000, 0x37b88000, 0x37b90000, 0x37b98000,
0x37ba0000, 0x37ba8000, 0x37bb0000, 0x37bb8000, 0x37bc0000, 0x37bc8000,
0x37bd0000, 0x37bd8000, 0x37be0000, 0x37be8000, 0x37bf0000, 0x37bf8000,
0x37c00000, 0x37c08000, 0x37c10000, 0x37c18000, 0x37c20000, 0x37c28000,
0x37c30000, 0x37c38000, 0x37c40000, 0x37c48000, 0x37c50000, 0x37c58000,
0x37c60000, 0x37c68000, 0x37c70000, 0x37c78000, 0x37c80000, 0x37c88000,
0x37c90000, 0x37c98000, 0x37ca0000, 0x37ca8000, 0x37cb0000, 0x37cb8000,
0x37cc0000, 0x37cc8000, 0x37cd0000, 0x37cd8000, 0x37ce0000, 0x37ce8000,
0x37cf0000, 0x37cf8000, 0x37d00000, 0x37d08000, 0x37d10000, 0x37d18000,
0x37d20000, 0x37d28000, 0x37d30000, 0x37d38000, 0x37d40000, 0x37d48000,
0x37d50000, 0x37d58000, 0x37d60000, 0x37d68000, 0x37d70000, 0x37d78000,
0x37d80000, 0x37d88000, 0x37d90000, 0x37d98000, 0x37da0000, 0x37da8000,
0x37db0000, 0x37db8000, 0x37dc0000, 0x37dc8000, 0x37dd0000, 0x37dd8000,
0x37de0000, 0x37de8000, 0x37df0000, 0x37df8000, 0x37e00000, 0x37e08000,
0x37e10000, 0x37e18000, 0x37e20000, 0x37e28000, 0x37e30000, 0x37e38000,
0x37e40000, 0x37e48000, 0x37e50000, 0x37e58000, 0x37e60000, 0x37e68000,
0x37e70000, 0x37e78000, 0x37e80000, 0x37e88000, 0x37e90000, 0x37e98000,
0x37ea0000, 0x37ea8000, 0x37eb0000, 0x37eb8000, 0x37ec0000, 0x37ec8000,
0x37ed0000, 0x37ed8000, 0x37ee0000, 0x37ee8000, 0x37ef0000, 0x37ef8000,
0x37f00000, 0x37f08000, 0x37f10000, 0x37f18000, 0x37f20000, 0x37f28000,
0x37f30000, 0x37f38000, 0x37f40000, 0x37f48000, 0x37f50000, 0x37f58000,
0x37f60000, 0x37f68000, 0x37f70000, 0x37f78000, 0x37f80000, 0x37f88000,
0x37f90000, 0x37f98000, 0x37fa0000, 0x37fa8000, 0x37fb0000, 0x37fb8000,
0x37fc0000, 0x37fc8000, 0x37fd0000, 0x37fd8000, 0x37fe0000, 0x37fe8000,
0x37ff0000, 0x37ff8000, 0x38000000, 0x38004000, 0x38008000, 0x3800c000,
0x38010000, 0x38014000, 0x38018000, 0x3801c000, 0x38020000, 0x38024000,
0x38028000, 0x3802c000, 0x38030000, 0x38034000, 0x38038000, 0x3803c000,
0x38040000, 0x38044000, 0x38048000, 0x3804c000, 0x38050000, 0x38054000,
0x38058000, 0x3805c000, 0x38060000, 0x38064000, 0x38068000, 0x3806c000,
0x38070000, 0x38074000, 0x38078000, 0x3807c000, 0x38080000, 0x38084000,
0x38088000, 0x3808c000, 0x38090000, 0x38094000, 0x38098000, 0x3809c000,
0x380a0000, 0x380a4000, 0x380a8000, 0x380ac000, 0x380b0000, 0x380b4000,
0x380b8000, 0x380bc000, 0x380c0000, 0x380c4000, 0x380c8000, 0x380cc000,
0x380d0000, 0x380d4000, 0x380d8000, 0x380dc000, 0x380e0000, 0x380e4000,
0x380e8000, 0x380ec000, 0x380f0000, 0x380f4000, 0x380f8000, 0x380fc000,
0x38100000, 0x38104000, 0x38108000, 0x3810c000, 0x38110000, 0x38114000,
0x38118000, 0x3811c000, 0x38120000, 0x38124000, 0x38128000, 0x3812c000,
0x38130000, 0x38134000, 0x38138000, 0x3813c000, 0x38140000, 0x38144000,
0x38148000, 0x3814c000, 0x38150000, 0x38154000, 0x38158000, 0x3815c000,
0x38160000, 0x38164000, 0x38168000, 0x3816c000, 0x38170000, 0x38174000,
0x38178000, 0x3817c000, 0x38180000, 0x38184000, 0x38188000, 0x3818c000,
0x38190000, 0x38194000, 0x38198000, 0x3819c000, 0x381a0000, 0x381a4000,
0x381a8000, 0x381ac000, 0x381b0000, 0x381b4000, 0x381b8000, 0x381bc000,
0x381c0000, 0x381c4000, 0x381c8000, 0x381cc000, 0x381d0000, 0x381d4000,
0x381d8000, 0x381dc000, 0x381e0000, 0x381e4000, 0x381e8000, 0x381ec000,
0x381f0000, 0x381f4000, 0x381f8000, 0x381fc000, 0x38200000, 0x38204000,
0x38208000, 0x3820c000, 0x38210000, 0x38214000, 0x38218000, 0x3821c000,
0x38220000, 0x38224000, 0x38228000, 0x3822c000, 0x38230000, 0x38234000,
0x38238000, 0x3823c000, 0x38240000, 0x38244000, 0x38248000, 0x3824c000,
0x38250000, 0x38254000, 0x38258000, 0x3825c000, 0x38260000, 0x38264000,
0x38268000, 0x3826c000, 0x38270000, 0x38274000, 0x38278000, 0x3827c000,
0x38280000, 0x38284000, 0x38288000, 0x3828c000, 0x38290000, 0x38294000,
0x38298000, 0x3829c000, 0x382a0000, 0x382a4000, 0x382a8000, 0x382ac000,
0x382b0000, 0x382b4000, 0x382b8000, 0x382bc000, 0x382c0000, 0x382c4000,
0x382c8000, 0x382cc000, 0x382d0000, 0x382d4000, 0x382d8000, 0x382dc000,
0x382e0000, 0x382e4000, 0x382e8000, 0x382ec000, 0x382f0000, 0x382f4000,
0x382f8000, 0x382fc000, 0x38300000, 0x38304000, 0x38308000, 0x3830c000,
0x38310000, 0x38314000, 0x38318000, 0x3831c000, 0x38320000, 0x38324000,
0x38328000, 0x3832c000, 0x38330000, 0x38334000, 0x38338000, 0x3833c000,
0x38340000, 0x38344000, 0x38348000, 0x3834c000, 0x38350000, 0x38354000,
0x38358000, 0x3835c000, 0x38360000, 0x38364000, 0x38368000, 0x3836c000,
0x38370000, 0x38374000, 0x38378000, 0x3837c000, 0x38380000, 0x38384000,
0x38388000, 0x3838c000, 0x38390000, 0x38394000, 0x38398000, 0x3839c000,
0x383a0000, 0x383a4000, 0x383a8000, 0x383ac000, 0x383b0000, 0x383b4000,
0x383b8000, 0x383bc000, 0x383c0000, 0x383c4000, 0x383c8000, 0x383cc000,
0x383d0000, 0x383d4000, 0x383d8000, 0x383dc000, 0x383e0000, 0x383e4000,
0x383e8000, 0x383ec000, 0x383f0000, 0x383f4000, 0x383f8000, 0x383fc000,
0x38400000, 0x38404000, 0x38408000, 0x3840c000, 0x38410000, 0x38414000,
0x38418000, 0x3841c000, 0x38420000, 0x38424000, 0x38428000, 0x3842c000,
0x38430000, 0x38434000, 0x38438000, 0x3843c000, 0x38440000, 0x38444000,
0x38448000, 0x3844c000, 0x38450000, 0x38454000, 0x38458000, 0x3845c000,
0x38460000, 0x38464000, 0x38468000, 0x3846c000, 0x38470000, 0x38474000,
0x38478000, 0x3847c000, 0x38480000, 0x38484000, 0x38488000, 0x3848c000,
0x38490000, 0x38494000, 0x38498000, 0x3849c000, 0x384a0000, 0x384a4000,
0x384a8000, 0x384ac000, 0x384b0000, 0x384b4000, 0x384b8000, 0x384bc000,
0x384c0000, 0x384c4000, 0x384c8000, 0x384cc000, 0x384d0000, 0x384d4000,
0x384d8000, 0x384dc000, 0x384e0000, 0x384e4000, 0x384e8000, 0x384ec000,
0x384f0000, 0x384f4000, 0x384f8000, 0x384fc000, 0x38500000, 0x38504000,
0x38508000, 0x3850c000, 0x38510000, 0x38514000, 0x38518000, 0x3851c000,
0x38520000, 0x38524000, 0x38528000, 0x3852c000, 0x38530000, 0x38534000,
0x38538000, 0x3853c000, 0x38540000, 0x38544000, 0x38548000, 0x3854c000,
0x38550000, 0x38554000, 0x38558000, 0x3855c000, 0x38560000, 0x38564000,
0x38568000, 0x3856c000, 0x38570000, 0x38574000, 0x38578000, 0x3857c000,
0x38580000, 0x38584000, 0x38588000, 0x3858c000, 0x38590000, 0x38594000,
0x38598000, 0x3859c000, 0x385a0000, 0x385a4000, 0x385a8000, 0x385ac000,
0x385b0000, 0x385b4000, 0x385b8000, 0x385bc000, 0x385c0000, 0x385c4000,
0x385c8000, 0x385cc000, 0x385d0000, 0x385d4000, 0x385d8000, 0x385dc000,
0x385e0000, 0x385e4000, 0x385e8000, 0x385ec000, 0x385f0000, 0x385f4000,
0x385f8000, 0x385fc000, 0x38600000, 0x38604000, 0x38608000, 0x3860c000,
0x38610000, 0x38614000, 0x38618000, 0x3861c000, 0x38620000, 0x38624000,
0x38628000, 0x3862c000, 0x38630000, 0x38634000, 0x38638000, 0x3863c000,
0x38640000, 0x38644000, 0x38648000, 0x3864c000, 0x38650000, 0x38654000,
0x38658000, 0x3865c000, 0x38660000, 0x38664000, 0x38668000, 0x3866c000,
0x38670000, 0x38674000, 0x38678000, 0x3867c000, 0x38680000, 0x38684000,
0x38688000, 0x3868c000, 0x38690000, 0x38694000, 0x38698000, 0x3869c000,
0x386a0000, 0x386a4000, 0x386a8000, 0x386ac000, 0x386b0000, 0x386b4000,
0x386b8000, 0x386bc000, 0x386c0000, 0x386c4000, 0x386c8000, 0x386cc000,
0x386d0000, 0x386d4000, 0x386d8000, 0x386dc000, 0x386e0000, 0x386e4000,
0x386e8000, 0x386ec000, 0x386f0000, 0x386f4000, 0x386f8000, 0x386fc000,
0x38700000, 0x38704000, 0x38708000, 0x3870c000, 0x38710000, 0x38714000,
0x38718000, 0x3871c000, 0x38720000, 0x38724000, 0x38728000, 0x3872c000,
0x38730000, 0x38734000, 0x38738000, 0x3873c000, 0x38740000, 0x38744000,
0x38748000, 0x3874c000, 0x38750000, 0x38754000, 0x38758000, 0x3875c000,
0x38760000, 0x38764000, 0x38768000, 0x3876c000, 0x38770000, 0x38774000,
0x38778000, 0x3877c000, 0x38780000, 0x38784000, 0x38788000, 0x3878c000,
0x38790000, 0x38794000, 0x38798000, 0x3879c000, 0x387a0000, 0x387a4000,
0x387a8000, 0x387ac000, 0x387b0000, 0x387b4000, 0x387b8000, 0x387bc000,
0x387c0000, 0x387c4000, 0x387c8000, 0x387cc000, 0x387d0000, 0x387d4000,
0x387d8000, 0x387dc000, 0x387e0000, 0x387e4000, 0x387e8000, 0x387ec000,
0x387f0000, 0x387f4000, 0x387f8000, 0x387fc000, 0x38000000, 0x38002000,
0x38004000, 0x38006000, 0x38008000, 0x3800a000, 0x3800c000, 0x3800e000,
0x38010000, 0x38012000, 0x38014000, 0x38016000, 0x38018000, 0x3801a000,
0x3801c000, 0x3801e000, 0x38020000, 0x38022000, 0x38024000, 0x38026000,
0x38028000, 0x3802a000, 0x3802c000, 0x3802e000, 0x38030000, 0x38032000,
0x38034000, 0x38036000, 0x38038000, 0x3803a000, 0x3803c000, 0x3803e000,
0x38040000, 0x38042000, 0x38044000, 0x38046000, 0x38048000, 0x3804a000,
0x3804c000, 0x3804e000, 0x38050000, 0x38052000, 0x38054000, 0x38056000,
0x38058000, 0x3805a000, 0x3805c000, 0x3805e000, 0x38060000, 0x38062000,
0x38064000, 0x38066000, 0x38068000, 0x3806a000, 0x3806c000, 0x3806e000,
0x38070000, 0x38072000, 0x38074000, 0x38076000, 0x38078000, 0x3807a000,
0x3807c000, 0x3807e000, 0x38080000, 0x38082000, 0x38084000, 0x38086000,
0x38088000, 0x3808a000, 0x3808c000, 0x3808e000, 0x38090000, 0x38092000,
0x38094000, 0x38096000, 0x38098000, 0x3809a000, 0x3809c000, 0x3809e000,
0x380a0000, 0x380a2000, 0x380a4000, 0x380a6000, 0x380a8000, 0x380aa000,
0x380ac000, 0x380ae000, 0x380b0000, 0x380b2000, 0x380b4000, 0x380b6000,
0x380b8000, 0x380ba000, 0x380bc000, 0x380be000, 0x380c0000, 0x380c2000,
0x380c4000, 0x380c6000, 0x380c8000, 0x380ca000, 0x380cc000, 0x380ce000,
0x380d0000, 0x380d2000, 0x380d4000, 0x380d6000, 0x380d8000, 0x380da000,
0x380dc000, 0x380de000, 0x380e0000, 0x380e2000, 0x380e4000, 0x380e6000,
0x380e8000, 0x380ea000, 0x380ec000, 0x380ee000, 0x380f0000, 0x380f2000,
0x380f4000, 0x380f6000, 0x380f8000, 0x380fa000, 0x380fc000, 0x380fe000,
0x38100000, 0x38102000, 0x38104000, 0x38106000, 0x38108000, 0x3810a000,
0x3810c000, 0x3810e000, 0x38110000, 0x38112000, 0x38114000, 0x38116000,
0x38118000, 0x3811a000, 0x3811c000, 0x3811e000, 0x38120000, 0x38122000,
0x38124000, 0x38126000, 0x38128000, 0x3812a000, 0x3812c000, 0x3812e000,
0x38130000, 0x38132000, 0x38134000, 0x38136000, 0x38138000, 0x3813a000,
0x3813c000, 0x3813e000, 0x38140000, 0x38142000, 0x38144000, 0x38146000,
0x38148000, 0x3814a000, 0x3814c000, 0x3814e000, 0x38150000, 0x38152000,
0x38154000, 0x38156000, 0x38158000, 0x3815a000, 0x3815c000, 0x3815e000,
0x38160000, 0x38162000, 0x38164000, 0x38166000, 0x38168000, 0x3816a000,
0x3816c000, 0x3816e000, 0x38170000, 0x38172000, 0x38174000, 0x38176000,
0x38178000, 0x3817a000, 0x3817c000, 0x3817e000, 0x38180000, 0x38182000,
0x38184000, 0x38186000, 0x38188000, 0x3818a000, 0x3818c000, 0x3818e000,
0x38190000, 0x38192000, 0x38194000, 0x38196000, 0x38198000, 0x3819a000,
0x3819c000, 0x3819e000, 0x381a0000, 0x381a2000, 0x381a4000, 0x381a6000,
0x381a8000, 0x381aa000, 0x381ac000, 0x381ae000, 0x381b0000, 0x381b2000,
0x381b4000, 0x381b6000, 0x381b8000, 0x381ba000, 0x381bc000, 0x381be000,
0x381c0000, 0x381c2000, 0x381c4000, 0x381c6000, 0x381c8000, 0x381ca000,
0x381cc000, 0x381ce000, 0x381d0000, 0x381d2000, 0x381d4000, 0x381d6000,
0x381d8000, 0x381da000, 0x381dc000, 0x381de000, 0x381e0000, 0x381e2000,
0x381e4000, 0x381e6000, 0x381e8000, 0x381ea000, 0x381ec000, 0x381ee000,
0x381f0000, 0x381f2000, 0x381f4000, 0x381f6000, 0x381f8000, 0x381fa000,
0x381fc000, 0x381fe000, 0x38200000, 0x38202000, 0x38204000, 0x38206000,
0x38208000, 0x3820a000, 0x3820c000, 0x3820e000, 0x38210000, 0x38212000,
0x38214000, 0x38216000, 0x38218000, 0x3821a000, 0x3821c000, 0x3821e000,
0x38220000, 0x38222000, 0x38224000, 0x38226000, 0x38228000, 0x3822a000,
0x3822c000, 0x3822e000, 0x38230000, 0x38232000, 0x38234000, 0x38236000,
0x38238000, 0x3823a000, 0x3823c000, 0x3823e000, 0x38240000, 0x38242000,
0x38244000, 0x38246000, 0x38248000, 0x3824a000, 0x3824c000, 0x3824e000,
0x38250000, 0x38252000, 0x38254000, 0x38256000, 0x38258000, 0x3825a000,
0x3825c000, 0x3825e000, 0x38260000, 0x38262000, 0x38264000, 0x38266000,
0x38268000, 0x3826a000, 0x3826c000, 0x3826e000, 0x38270000, 0x38272000,
0x38274000, 0x38276000, 0x38278000, 0x3827a000, 0x3827c000, 0x3827e000,
0x38280000, 0x38282000, 0x38284000, 0x38286000, 0x38288000, 0x3828a000,
0x3828c000, 0x3828e000, 0x38290000, 0x38292000, 0x38294000, 0x38296000,
0x38298000, 0x3829a000, 0x3829c000, 0x3829e000, 0x382a0000, 0x382a2000,
0x382a4000, 0x382a6000, 0x382a8000, 0x382aa000, 0x382ac000, 0x382ae000,
0x382b0000, 0x382b2000, 0x382b4000, 0x382b6000, 0x382b8000, 0x382ba000,
0x382bc000, 0x382be000, 0x382c0000, 0x382c2000, 0x382c4000, 0x382c6000,
0x382c8000, 0x382ca000, 0x382cc000, 0x382ce000, 0x382d0000, 0x382d2000,
0x382d4000, 0x382d6000, 0x382d8000, 0x382da000, 0x382dc000, 0x382de000,
0x382e0000, 0x382e2000, 0x382e4000, 0x382e6000, 0x382e8000, 0x382ea000,
0x382ec000, 0x382ee000, 0x382f0000, 0x382f2000, 0x382f4000, 0x382f6000,
0x382f8000, 0x382fa000, 0x382fc000, 0x382fe000, 0x38300000, 0x38302000,
0x38304000, 0x38306000, 0x38308000, 0x3830a000, 0x3830c000, 0x3830e000,
0x38310000, 0x38312000, 0x38314000, 0x38316000, 0x38318000, 0x3831a000,
0x3831c000, 0x3831e000, 0x38320000, 0x38322000, 0x38324000, 0x38326000,
0x38328000, 0x3832a000, 0x3832c000, 0x3832e000, 0x38330000, 0x38332000,
0x38334000, 0x38336000, 0x38338000, 0x3833a000, 0x3833c000, 0x3833e000,
0x38340000, 0x38342000, 0x38344000, 0x38346000, 0x38348000, 0x3834a000,
0x3834c000, 0x3834e000, 0x38350000, 0x38352000, 0x38354000, 0x38356000,
0x38358000, 0x3835a000, 0x3835c000, 0x3835e000, 0x38360000, 0x38362000,
0x38364000, 0x38366000, 0x38368000, 0x3836a000, 0x3836c000, 0x3836e000,
0x38370000, 0x38372000, 0x38374000, 0x38376000, 0x38378000, 0x3837a000,
0x3837c000, 0x3837e000, 0x38380000, 0x38382000, 0x38384000, 0x38386000,
0x38388000, 0x3838a000, 0x3838c000, 0x3838e000, 0x38390000, 0x38392000,
0x38394000, 0x38396000, 0x38398000, 0x3839a000, 0x3839c000, 0x3839e000,
0x383a0000, 0x383a2000, 0x383a4000, 0x383a6000, 0x383a8000, 0x383aa000,
0x383ac000, 0x383ae000, 0x383b0000, 0x383b2000, 0x383b4000, 0x383b6000,
0x383b8000, 0x383ba000, 0x383bc000, 0x383be000, 0x383c0000, 0x383c2000,
0x383c4000, 0x383c6000, 0x383c8000, 0x383ca000, 0x383cc000, 0x383ce000,
0x383d0000, 0x383d2000, 0x383d4000, 0x383d6000, 0x383d8000, 0x383da000,
0x383dc000, 0x383de000, 0x383e0000, 0x383e2000, 0x383e4000, 0x383e6000,
0x383e8000, 0x383ea000, 0x383ec000, 0x383ee000, 0x383f0000, 0x383f2000,
0x383f4000, 0x383f6000, 0x383f8000, 0x383fa000, 0x383fc000, 0x383fe000,
0x38400000, 0x38402000, 0x38404000, 0x38406000, 0x38408000, 0x3840a000,
0x3840c000, 0x3840e000, 0x38410000, 0x38412000, 0x38414000, 0x38416000,
0x38418000, 0x3841a000, 0x3841c000, 0x3841e000, 0x38420000, 0x38422000,
0x38424000, 0x38426000, 0x38428000, 0x3842a000, 0x3842c000, 0x3842e000,
0x38430000, 0x38432000, 0x38434000, 0x38436000, 0x38438000, 0x3843a000,
0x3843c000, 0x3843e000, 0x38440000, 0x38442000, 0x38444000, 0x38446000,
0x38448000, 0x3844a000, 0x3844c000, 0x3844e000, 0x38450000, 0x38452000,
0x38454000, 0x38456000, 0x38458000, 0x3845a000, 0x3845c000, 0x3845e000,
0x38460000, 0x38462000, 0x38464000, 0x38466000, 0x38468000, 0x3846a000,
0x3846c000, 0x3846e000, 0x38470000, 0x38472000, 0x38474000, 0x38476000,
0x38478000, 0x3847a000, 0x3847c000, 0x3847e000, 0x38480000, 0x38482000,
0x38484000, 0x38486000, 0x38488000, 0x3848a000, 0x3848c000, 0x3848e000,
0x38490000, 0x38492000, 0x38494000, 0x38496000, 0x38498000, 0x3849a000,
0x3849c000, 0x3849e000, 0x384a0000, 0x384a2000, 0x384a4000, 0x384a6000,
0x384a8000, 0x384aa000, 0x384ac000, 0x384ae000, 0x384b0000, 0x384b2000,
0x384b4000, 0x384b6000, 0x384b8000, 0x384ba000, 0x384bc000, 0x384be000,
0x384c0000, 0x384c2000, 0x384c4000, 0x384c6000, 0x384c8000, 0x384ca000,
0x384cc000, 0x384ce000, 0x384d0000, 0x384d2000, 0x384d4000, 0x384d6000,
0x384d8000, 0x384da000, 0x384dc000, 0x384de000, 0x384e0000, 0x384e2000,
0x384e4000, 0x384e6000, 0x384e8000, 0x384ea000, 0x384ec000, 0x384ee000,
0x384f0000, 0x384f2000, 0x384f4000, 0x384f6000, 0x384f8000, 0x384fa000,
0x384fc000, 0x384fe000, 0x38500000, 0x38502000, 0x38504000, 0x38506000,
0x38508000, 0x3850a000, 0x3850c000, 0x3850e000, 0x38510000, 0x38512000,
0x38514000, 0x38516000, 0x38518000, 0x3851a000, 0x3851c000, 0x3851e000,
0x38520000, 0x38522000, 0x38524000, 0x38526000, 0x38528000, 0x3852a000,
0x3852c000, 0x3852e000, 0x38530000, 0x38532000, 0x38534000, 0x38536000,
0x38538000, 0x3853a000, 0x3853c000, 0x3853e000, 0x38540000, 0x38542000,
0x38544000, 0x38546000, 0x38548000, 0x3854a000, 0x3854c000, 0x3854e000,
0x38550000, 0x38552000, 0x38554000, 0x38556000, 0x38558000, 0x3855a000,
0x3855c000, 0x3855e000, 0x38560000, 0x38562000, 0x38564000, 0x38566000,
0x38568000, 0x3856a000, 0x3856c000, 0x3856e000, 0x38570000, 0x38572000,
0x38574000, 0x38576000, 0x38578000, 0x3857a000, 0x3857c000, 0x3857e000,
0x38580000, 0x38582000, 0x38584000, 0x38586000, 0x38588000, 0x3858a000,
0x3858c000, 0x3858e000, 0x38590000, 0x38592000, 0x38594000, 0x38596000,
0x38598000, 0x3859a000, 0x3859c000, 0x3859e000, 0x385a0000, 0x385a2000,
0x385a4000, 0x385a6000, 0x385a8000, 0x385aa000, 0x385ac000, 0x385ae000,
0x385b0000, 0x385b2000, 0x385b4000, 0x385b6000, 0x385b8000, 0x385ba000,
0x385bc000, 0x385be000, 0x385c0000, 0x385c2000, 0x385c4000, 0x385c6000,
0x385c8000, 0x385ca000, 0x385cc000, 0x385ce000, 0x385d0000, 0x385d2000,
0x385d4000, 0x385d6000, 0x385d8000, 0x385da000, 0x385dc000, 0x385de000,
0x385e0000, 0x385e2000, 0x385e4000, 0x385e6000, 0x385e8000, 0x385ea000,
0x385ec000, 0x385ee000, 0x385f0000, 0x385f2000, 0x385f4000, 0x385f6000,
0x385f8000, 0x385fa000, 0x385fc000, 0x385fe000, 0x38600000, 0x38602000,
0x38604000, 0x38606000, 0x38608000, 0x3860a000, 0x3860c000, 0x3860e000,
0x38610000, 0x38612000, 0x38614000, 0x38616000, 0x38618000, 0x3861a000,
0x3861c000, 0x3861e000, 0x38620000, 0x38622000, 0x38624000, 0x38626000,
0x38628000, 0x3862a000, 0x3862c000, 0x3862e000, 0x38630000, 0x38632000,
0x38634000, 0x38636000, 0x38638000, 0x3863a000, 0x3863c000, 0x3863e000,
0x38640000, 0x38642000, 0x38644000, 0x38646000, 0x38648000, 0x3864a000,
0x3864c000, 0x3864e000, 0x38650000, 0x38652000, 0x38654000, 0x38656000,
0x38658000, 0x3865a000, 0x3865c000, 0x3865e000, 0x38660000, 0x38662000,
0x38664000, 0x38666000, 0x38668000, 0x3866a000, 0x3866c000, 0x3866e000,
0x38670000, 0x38672000, 0x38674000, 0x38676000, 0x38678000, 0x3867a000,
0x3867c000, 0x3867e000, 0x38680000, 0x38682000, 0x38684000, 0x38686000,
0x38688000, 0x3868a000, 0x3868c000, 0x3868e000, 0x38690000, 0x38692000,
0x38694000, 0x38696000, 0x38698000, 0x3869a000, 0x3869c000, 0x3869e000,
0x386a0000, 0x386a2000, 0x386a4000, 0x386a6000, 0x386a8000, 0x386aa000,
0x386ac000, 0x386ae000, 0x386b0000, 0x386b2000, 0x386b4000, 0x386b6000,
0x386b8000, 0x386ba000, 0x386bc000, 0x386be000, 0x386c0000, 0x386c2000,
0x386c4000, 0x386c6000, 0x386c8000, 0x386ca000, 0x386cc000, 0x386ce000,
0x386d0000, 0x386d2000, 0x386d4000, 0x386d6000, 0x386d8000, 0x386da000,
0x386dc000, 0x386de000, 0x386e0000, 0x386e2000, 0x386e4000, 0x386e6000,
0x386e8000, 0x386ea000, 0x386ec000, 0x386ee000, 0x386f0000, 0x386f2000,
0x386f4000, 0x386f6000, 0x386f8000, 0x386fa000, 0x386fc000, 0x386fe000,
0x38700000, 0x38702000, 0x38704000, 0x38706000, 0x38708000, 0x3870a000,
0x3870c000, 0x3870e000, 0x38710000, 0x38712000, 0x38714000, 0x38716000,
0x38718000, 0x3871a000, 0x3871c000, 0x3871e000, 0x38720000, 0x38722000,
0x38724000, 0x38726000, 0x38728000, 0x3872a000, 0x3872c000, 0x3872e000,
0x38730000, 0x38732000, 0x38734000, 0x38736000, 0x38738000, 0x3873a000,
0x3873c000, 0x3873e000, 0x38740000, 0x38742000, 0x38744000, 0x38746000,
0x38748000, 0x3874a000, 0x3874c000, 0x3874e000, 0x38750000, 0x38752000,
0x38754000, 0x38756000, 0x38758000, 0x3875a000, 0x3875c000, 0x3875e000,
0x38760000, 0x38762000, 0x38764000, 0x38766000, 0x38768000, 0x3876a000,
0x3876c000, 0x3876e000, 0x38770000, 0x38772000, 0x38774000, 0x38776000,
0x38778000, 0x3877a000, 0x3877c000, 0x3877e000, 0x38780000, 0x38782000,
0x38784000, 0x38786000, 0x38788000, 0x3878a000, 0x3878c000, 0x3878e000,
0x38790000, 0x38792000, 0x38794000, 0x38796000, 0x38798000, 0x3879a000,
0x3879c000, 0x3879e000, 0x387a0000, 0x387a2000, 0x387a4000, 0x387a6000,
0x387a8000, 0x387aa000, 0x387ac000, 0x387ae000, 0x387b0000, 0x387b2000,
0x387b4000, 0x387b6000, 0x387b8000, 0x387ba000, 0x387bc000, 0x387be000,
0x387c0000, 0x387c2000, 0x387c4000, 0x387c6000, 0x387c8000, 0x387ca000,
0x387cc000, 0x387ce000, 0x387d0000, 0x387d2000, 0x387d4000, 0x387d6000,
0x387d8000, 0x387da000, 0x387dc000, 0x387de000, 0x387e0000, 0x387e2000,
0x387e4000, 0x387e6000, 0x387e8000, 0x387ea000, 0x387ec000, 0x387ee000,
0x387f0000, 0x387f2000, 0x387f4000, 0x387f6000, 0x387f8000, 0x387fa000,
0x387fc000, 0x387fe000
};
static uint16_t m__offset[64] = {
0x0000, 0x0400, 0x0400, 0x0400, 0x0400, 0x0400,
0x0400, 0x0400, 0x0400, 0x0400, 0x0400, 0x0400,
0x0400, 0x0400, 0x0400, 0x0400, 0x0400, 0x0400,
0x0400, 0x0400, 0x0400, 0x0400, 0x0400, 0x0400,
0x0400, 0x0400, 0x0400, 0x0400, 0x0400, 0x0400,
0x0400, 0x0400, 0x0000, 0x0400, 0x0400, 0x0400,
0x0400, 0x0400, 0x0400, 0x0400, 0x0400, 0x0400,
0x0400, 0x0400, 0x0400, 0x0400, 0x0400, 0x0400,
0x0400, 0x0400, 0x0400, 0x0400, 0x0400, 0x0400,
0x0400, 0x0400, 0x0400, 0x0400, 0x0400, 0x0400,
0x0400, 0x0400, 0x0400, 0x0400
};
static uint32_t m__exponent[64] = {
0x00000000, 0x00800000, 0x01000000, 0x01800000, 0x02000000, 0x02800000,
0x03000000, 0x03800000, 0x04000000, 0x04800000, 0x05000000, 0x05800000,
0x06000000, 0x06800000, 0x07000000, 0x07800000, 0x08000000, 0x08800000,
0x09000000, 0x09800000, 0x0a000000, 0x0a800000, 0x0b000000, 0x0b800000,
0x0c000000, 0x0c800000, 0x0d000000, 0x0d800000, 0x0e000000, 0x0e800000,
0x0f000000, 0x47800000, 0x80000000, 0x80800000, 0x81000000, 0x81800000,
0x82000000, 0x82800000, 0x83000000, 0x83800000, 0x84000000, 0x84800000,
0x85000000, 0x85800000, 0x86000000, 0x86800000, 0x87000000, 0x87800000,
0x88000000, 0x88800000, 0x89000000, 0x89800000, 0x8a000000, 0x8a800000,
0x8b000000, 0x8b800000, 0x8c000000, 0x8c800000, 0x8d000000, 0x8d800000,
0x8e000000, 0x8e800000, 0x8f000000, 0xc7800000
};
static uint16_t m__base[512] = {
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040,
0x0080, 0x0100, 0x0200, 0x0400, 0x0800, 0x0c00, 0x1000, 0x1400, 0x1800, 0x1c00,
0x2000, 0x2400, 0x2800, 0x2c00, 0x3000, 0x3400, 0x3800, 0x3c00, 0x4000, 0x4400,
0x4800, 0x4c00, 0x5000, 0x5400, 0x5800, 0x5c00, 0x6000, 0x6400, 0x6800, 0x6c00,
0x7000, 0x7400, 0x7800, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00,
0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00,
0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00,
0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00,
0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00,
0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00,
0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00,
0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00,
0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00,
0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00,
0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00,
0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x7c00, 0x8000, 0x8000, 0x8000, 0x8000,
0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000,
0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000,
0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000,
0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000,
0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000,
0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000,
0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000,
0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000,
0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000,
0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8001,
0x8002, 0x8004, 0x8008, 0x8010, 0x8020, 0x8040, 0x8080, 0x8100, 0x8200, 0x8400,
0x8800, 0x8c00, 0x9000, 0x9400, 0x9800, 0x9c00, 0xa000, 0xa400, 0xa800, 0xac00,
0xb000, 0xb400, 0xb800, 0xbc00, 0xc000, 0xc400, 0xc800, 0xcc00, 0xd000, 0xd400,
0xd800, 0xdc00, 0xe000, 0xe400, 0xe800, 0xec00, 0xf000, 0xf400, 0xf800, 0xfc00,
0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00,
0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00,
0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00,
0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00,
0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00,
0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00,
0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00,
0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00,
0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00,
0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00,
0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00, 0xfc00,
0xfc00, 0xfc00
};
static uint8_t m__shift[512] = {
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x17,
0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x0e, 0x0d, 0x0d, 0x0d, 0x0d,
0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d,
0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x0d, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13,
0x12, 0x11, 0x10, 0x0f, 0x0e, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d,
0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d,
0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x0d
};
MIAPI float m_half2float(uint16_t h)
{
union {
float flt;
uint32_t num;
} out;
int n = h >> 10;
out.num = m__mantissa[ (h & 0x3ff) + m__offset[n]] + m__exponent[n];
return out.flt;
}
MIAPI uint16_t m_float2half(float flt)
{
union {
float flt;
uint32_t num;
} in;
uint32_t n, j;
in.flt = flt;
n = in.num;
j = (n >> 23) & 0x1ff;
return (uint16_t) ((uint32_t) m__base[j] + ((n & 0x007fffff) >> m__shift[j]));
}
MIAPI int m_type_sizeof(char type)
{
switch (type) {
case M_BYTE:
case M_UBYTE:
return sizeof(uint8_t);
break;
case M_SHORT:
case M_USHORT:
case M_HALF:
return sizeof(uint16_t);
break;
case M_BOOL:
case M_INT:
case M_UINT:
return sizeof(uint32_t);
break;
case M_FLOAT:
return sizeof(float);
break;
case M_DOUBLE:
return sizeof(double);
break;
default:
assert(0);
return 0;
}
}
MIAPI void m_image_create(struct m_image *image, char type, int width, int height, int comp)
{
int size = width * height * comp;
assert(size > 0);
/* already allocated */
if (image->data != 0 && type == image->type && width == image->width && height == image->height && comp == image->comp)
return;
M_SAFE_FREE(image->data);
image->data = malloc(size * m_type_sizeof(type));
if( !image->data )
printf("BAD ALLOC:m_image_create\n");
image->type = type;
image->width = width;
image->height = height;
image->comp = comp;
image->size = size;
}
MIAPI void m_image_destroy(struct m_image *image)
{
M_SAFE_FREE(image->data);
memset(image, 0, sizeof(struct m_image));
}
MIAPI void m_image_copy(struct m_image *dest, const struct m_image *src)
{
m_image_create(dest, src->type, src->width, src->height, src->comp);
switch (dest->type) {
case M_BYTE:
case M_UBYTE:
memcpy(dest->data, src->data, dest->size*sizeof(uint8_t));
break;
case M_SHORT:
case M_USHORT:
case M_HALF:
memcpy(dest->data, src->data, dest->size*sizeof(uint16_t));
break;
case M_INT:
case M_UINT:
memcpy(dest->data, src->data, dest->size*sizeof(uint32_t));
break;
case M_FLOAT:
memcpy(dest->data, src->data, dest->size*sizeof(float));
break;
default:
assert(0);
break;
}
}
MIAPI void m_image_copy_sub_image(struct m_image *dest, const struct m_image *src, int x, int y, int w, int h)
{
#define M_COPY_SUBI(T)\
{\
T *sData = (T *)src->data + (miny * src->width + minx) * comp;\
T *dData = (T *)dest->data;\
int y;\
for (y = miny; y <= maxy; y++) {\
memcpy(dData, sData, dstep * sizeof(T));\
dData += dstep;\
sData += sstep;\
}\
}
if (dest == src) {
struct m_image tmp = M_IMAGE_IDENTITY();
m_image_copy(&tmp, src);
m_image_copy_sub_image(dest, &tmp, x, y, w, h);
m_image_destroy(&tmp);
}
else {
int comp = src->comp;
int minx = M_MAX(0, x);
int miny = M_MAX(0, y);
int maxx = M_CLAMP(x + w - 1, 0, src->width - 1);
int maxy = M_CLAMP(y + h - 1, 0, src->height - 1);
int dwidth = 1 + maxx - minx;
int dheight = 1 + maxy - miny;
int sstep = src->width * comp;
int dstep = dwidth * comp;
m_image_create(dest, src->type, dwidth, dheight, src->comp);
switch(src->type)
{
case M_BYTE:
case M_UBYTE:
M_COPY_SUBI(uint8_t);
break;
case M_SHORT:
case M_USHORT:
case M_HALF:
M_COPY_SUBI(uint16_t);
break;
case M_INT:
case M_UINT:
M_COPY_SUBI(uint32_t);
break;
case M_FLOAT:
M_COPY_SUBI(float);
break;
default:
assert(0);
break;
}
}
#undef M_COPY_SUBI
}
MIAPI void m_image_ubyte_to_float(struct m_image *dest, const struct m_image *src)
{
if (dest == src) {
struct m_image tmp = M_IMAGE_IDENTITY();
m_image_copy(&tmp, src);
m_image_ubyte_to_float(dest, &tmp);
m_image_destroy(&tmp);
}
else {
uint8_t *src_data;
float *dest_data;
float ubyte_div = 1.0f / 255.0f;
int i;
m_image_create(dest, M_FLOAT, src->width, src->height, src->comp);
src_data = (uint8_t *)src->data;
dest_data = (float *)dest->data;
for (i = 0; i < src->size; i++)
dest_data[i] = (float)src_data[i] * ubyte_div;
}
}
MIAPI void m_image_ushort_to_float(struct m_image *dest, const struct m_image *src)
{
if (dest == src) {
struct m_image tmp = M_IMAGE_IDENTITY();
m_image_copy(&tmp, src);
m_image_ushort_to_float(dest, &tmp);
m_image_destroy(&tmp);
}
else {
uint16_t *src_data;
float *dest_data;
float ushort_div = 1.0f / (float)65535;
int i;
m_image_create(dest, M_FLOAT, src->width, src->height, src->comp);
src_data = (uint16_t *)src->data;
dest_data = (float *)dest->data;
for (i = 0; i < src->size; i++)
dest_data[i] = (float)src_data[i] * ushort_div;
}
}
MIAPI void m_image_half_to_float(struct m_image *dest, const struct m_image *src)
{
if (dest == src) {
struct m_image tmp = M_IMAGE_IDENTITY();
m_image_copy(&tmp, src);
m_image_half_to_float(dest, &tmp);
m_image_destroy(&tmp);
}
else {
uint16_t *src_data;
float *dest_data;
int i;
m_image_create(dest, M_FLOAT, src->width, src->height, src->comp);
src_data = (uint16_t *)src->data;
dest_data = (float *)dest->data;
for (i = 0; i < src->size; i++)
dest_data[i] = m_half2float(src_data[i]);
}
}
MIAPI void m_image_float_to_ubyte(struct m_image *dest, const struct m_image *src)
{
if (dest == src) {
struct m_image tmp = M_IMAGE_IDENTITY();
m_image_copy(&tmp, src);
m_image_float_to_ubyte(dest, &tmp);
m_image_destroy(&tmp);
}
else {
float *src_data;
uint8_t *dest_data;
int i;
m_image_create(dest, M_UBYTE, src->width, src->height, src->comp);
src_data = (float *)src->data;
dest_data = (uint8_t *)dest->data;
for (i = 0; i < src->size; i++) {
int x = (int)(src_data[i] * 255.0f + 0.5f);
dest_data[i] = (uint8_t)M_CLAMP(x, 0, 255);
}
}
}
MIAPI void m_image_float_to_ushort(struct m_image *dest, const struct m_image *src)
{
if (dest == src) {
struct m_image tmp = M_IMAGE_IDENTITY();
m_image_copy(&tmp, src);
m_image_float_to_ushort(dest, &tmp);
m_image_destroy(&tmp);
}
else {
float *src_data;
uint16_t *dest_data;
int i;
m_image_create(dest, M_USHORT, src->width, src->height, src->comp);
src_data = (float *)src->data;
dest_data = (uint16_t *)dest->data;
for (i = 0; i < src->size; i++) {
int x = (int)(src_data[i] * 65535);
dest_data[i] = (uint16_t)M_CLAMP(x, 0, 65535);
}
}
}
MIAPI void m_image_float_to_half(struct m_image *dest, const struct m_image *src)
{
if (dest == src) {
struct m_image tmp = M_IMAGE_IDENTITY();
m_image_copy(&tmp, src);
m_image_float_to_half(dest, &tmp);
m_image_destroy(&tmp);
}
else {
float *src_data;
uint16_t *dest_data;
int i;
m_image_create(dest, M_HALF, src->width, src->height, src->comp);
src_data = (float *)src->data;
dest_data = (uint16_t *)dest->data;
for (i = 0; i < src->size; i++)
dest_data[i] = m_float2half(src_data[i]);
}
}
MIAPI void m_image_extract_component(struct m_image *dest, const struct m_image *src, int c)
{
#define M_EXTRACT(T)\
{\
T *dest_pixel = (T *)dest->data;\
T *src_pixel = (T *)src->data;\
for (i = 0; i < size; i += comp) {\
(*dest_pixel) = src_pixel[c];\
dest_pixel++;\
src_pixel += comp;\
}\
}
if (dest == src) {
struct m_image tmp = M_IMAGE_IDENTITY();
m_image_copy(&tmp, src);
m_image_extract_component(dest, &tmp, c);
m_image_destroy(&tmp);
}
else {
int width = src->width;
int height = src->height;
int comp = src->comp;
int size = src->size;
int i;
if(c >= src->comp) {
assert(0);
return;
}
m_image_create(dest, src->type, width, height, 1);
switch(src->type)
{
case M_BYTE:
case M_UBYTE:
M_EXTRACT(uint8_t);
break;
case M_SHORT:
case M_USHORT:
case M_HALF:
M_EXTRACT(uint16_t);
break;
case M_INT:
case M_UINT:
M_EXTRACT(uint32_t);
break;
case M_FLOAT:
M_EXTRACT(float);
break;
default:
assert(0);
break;
}
}
#undef M_EXTRACT
}
MIAPI void m_image_reframe_zero(struct m_image *dest, const struct m_image *src, int left, int top, int right, int bottom)
{
#define M_REFRAME(T)\
{\
T *src_data;\
T *src_pixel;\
T *dest_pixel;\
int c;\
int x, y;\
m_image_create(dest, src->type, width2, height2, comp);\
src_data = (T *)src->data;\
dest_pixel = (T *)dest->data;\
for (y = 0; y < height2; y++) {\
int ys = y - top;\
for (x = 0; x < width2; x++) {\
int xs = x - left;\
if (ys >= 0 && ys < height && xs >= 0 && xs < width) {\
src_pixel = src_data + (ys * width + xs) * comp;\
for (c = 0; c < comp; c++)\
dest_pixel[c] = src_pixel[c];\
}\
else {\
for (c = 0; c < comp; c++)\
dest_pixel[c] = 0;\
}\
dest_pixel += comp;\
}\
}\
}
if(left != 0 || top != 0 || right != 0 || bottom != 0) {
if (dest == src) {
struct m_image tmp = M_IMAGE_IDENTITY();
m_image_copy(&tmp, src);
m_image_reframe_zero(dest, &tmp, left, top, right, bottom);
m_image_destroy(&tmp);
}
else {
int comp = src->comp;
int width = src->width;
int height = src->height;
int width2 = width + left + right;
int height2 = height + top + bottom;
if(width2 > 0 && height2 > 0) {
switch(src->type) {
case M_BYTE:
case M_UBYTE:
M_REFRAME(uint8_t);
break;
case M_SHORT:
case M_USHORT:
case M_HALF:
M_REFRAME(uint16_t);
break;
case M_INT:
case M_UINT:
M_REFRAME(uint32_t);
break;
case M_FLOAT:
M_REFRAME(float);
break;
default:
assert(0);
break;
}
}
else {
assert(0);
}
}
}
else {
m_image_copy(dest, src);
}
#undef M_REFRAME
}
MIAPI void m_image_reframe(struct m_image *dest, const struct m_image *src, int left, int top, int right, int bottom)
{
#define M_REFRAME(T)\
{\
T *src_data;\
T *src_pixel;\
T *dest_pixel;\
int c;\
int x, y;\
m_image_create(dest, src->type, width2, height2, comp);\
src_data = (T *)src->data;\
dest_pixel = (T *)dest->data;\
for (y = 0; y < height2; y++) {\
T *src_y;\
int ys = y - top;\
src_y = src_data + M_CLAMP(ys, 0, hm1) * width * comp;\
for (x = 0; x < width2; x++) {\
int xs = x - left;\
src_pixel = src_y + M_CLAMP(xs, 0, wm1) * comp;\
for (c = 0; c < comp; c++)\
dest_pixel[c] = src_pixel[c];\
dest_pixel += comp;\
}\
}\
}
if(left != 0 || top != 0 || right != 0 || bottom != 0) {
if (dest == src) {
struct m_image tmp = M_IMAGE_IDENTITY();
m_image_copy(&tmp, src);
m_image_reframe(dest, &tmp, left, top, right, bottom);
m_image_destroy(&tmp);
}
else {
int comp = src->comp;
int width = src->width;
int height = src->height;
int width2 = width + left + right;
int height2 = height + top + bottom;
int wm1 = width - 1;
int hm1 = height - 1;
if(width2 > 0 && height2 > 0) {
switch(src->type) {
case M_BYTE:
case M_UBYTE:
M_REFRAME(uint8_t);
break;
case M_SHORT:
case M_USHORT:
case M_HALF:
M_REFRAME(uint16_t);
break;
case M_INT:
case M_UINT:
M_REFRAME(uint32_t);
break;
case M_FLOAT:
M_REFRAME(float);
break;
default:
assert(0);
break;
}
}
else {
assert(0);
}
}
}
else {
m_image_copy(dest, src);
}
#undef M_REFRAME
}
MIAPI void m_image_rotate_left(struct m_image *dest, const struct m_image *src)
{
#define M_ROTATE_L(T)\
{\
T *src_data = (T *)src->data;\
T *dest_pixel = (T *)dest->data;\
for (y = 0; y < width; y++)\
for (x = 0; x < height; x++) {\
T *src_pixel = src_data + (x * width + (width - 1 - y)) * comp;\
for (c = 0; c < comp; c++)\
dest_pixel[c] = src_pixel[c];\
dest_pixel += comp;\
}\
}
if (dest == src) {
struct m_image tmp = M_IMAGE_IDENTITY();
m_image_copy(&tmp, src);
m_image_rotate_left(dest, &tmp);
m_image_destroy(&tmp);
}
else {
int width = src->width;
int height = src->height;
int comp = src->comp;
int x, y, c;
m_image_create(dest, src->type, height, width, comp);
switch(src->type)
{
case M_BYTE:
case M_UBYTE:
M_ROTATE_L(uint8_t);
break;
case M_SHORT:
case M_USHORT:
case M_HALF:
M_ROTATE_L(uint16_t);
break;
case M_INT:
case M_UINT:
M_ROTATE_L(uint32_t);
break;
case M_FLOAT:
M_ROTATE_L(float);
break;
default:
assert(0);
break;
}
}
#undef M_ROTATE_L
}
MIAPI void m_image_rotate_right(struct m_image *dest, const struct m_image *src)
{
#define M_ROTATE_R(T)\
{\
T *src_data = (T *)src->data;\
T *dest_pixel = (T *)dest->data;\
for (y = 0; y < width; y++)\
for (x = 0; x < height; x++) {\
T *src_pixel = src_data + ((height - 1 - x) * width + y) * comp;\
for (c = 0; c < comp; c++)\
dest_pixel[c] = src_pixel[c];\
dest_pixel += comp;\
}\
}
if (dest == src) {
struct m_image tmp = M_IMAGE_IDENTITY();
m_image_copy(&tmp, src);
m_image_rotate_right(dest, &tmp);
m_image_destroy(&tmp);
}
else {
int width = src->width;
int height = src->height;
int comp = src->comp;
int x, y, c;
m_image_create(dest, src->type, height, width, comp);
switch(src->type)
{
case M_BYTE:
case M_UBYTE:
M_ROTATE_R(uint8_t);
break;
case M_SHORT:
case M_USHORT:
case M_HALF:
M_ROTATE_R(uint16_t);
break;
case M_INT:
case M_UINT:
M_ROTATE_R(uint32_t);
break;
case M_FLOAT:
M_ROTATE_R(float);
break;
default:
assert(0);
break;
}
}
#undef M_ROTATE_R
}
MIAPI void m_image_rotate_180(struct m_image *dest, const struct m_image *src)
{
#define M_ROTATE_180(T)\
{\
T *src_data = (T *)src->data;\
T *dest_pixel = (T *)dest->data;\
for (y = 0; y < height; y++)\
for (x = 0; x < width; x++) {\
T *src_pixel = src_data + ((height - 1 - y) * width + (width - 1 - x)) * comp;\
for (c = 0; c < comp; c++)\
dest_pixel[c] = src_pixel[c];\
dest_pixel += comp;\
}\
}
if (dest == src) {
struct m_image tmp = M_IMAGE_IDENTITY();
m_image_copy(&tmp, src);
m_image_rotate_180(dest, &tmp);
m_image_destroy(&tmp);
}
else {
int width = src->width;
int height = src->height;
int comp = src->comp;
int x, y, c;
m_image_create(dest, src->type, width, height, comp);
switch(src->type)
{
case M_BYTE:
case M_UBYTE:
M_ROTATE_180(uint8_t);
break;
case M_SHORT:
case M_USHORT:
case M_HALF:
M_ROTATE_180(uint16_t);
break;
case M_INT:
case M_UINT:
M_ROTATE_180(uint32_t);
break;
case M_FLOAT:
M_ROTATE_180(float);
break;
default:
assert(0);
break;
}
}
#undef M_ROTATE_180
}
MIAPI void m_image_mirror_x(struct m_image *dest, const struct m_image *src)
{
#define M_MIRROR_X(T)\
{\
T *src_data = (T *)src->data;\
T *dest_pixel = (T *)dest->data;\
for (y = 0; y < height; y++)\
for (x = 0; x < width; x++) {\
T *src_pixel = src_data + (y * width + (width - 1 - x)) * comp;\
for (c = 0; c < comp; c++)\
dest_pixel[c] = src_pixel[c];\
dest_pixel += comp;\
}\
}
if (dest == src) {
struct m_image tmp = M_IMAGE_IDENTITY();
m_image_copy(&tmp, src);
m_image_mirror_x(dest, &tmp);
m_image_destroy(&tmp);
}
else {
int width = src->width;
int height = src->height;
int comp = src->comp;
int x, y, c;
m_image_create(dest, src->type, width, height, comp);
switch(src->type)
{
case M_BYTE:
case M_UBYTE:
M_MIRROR_X(uint8_t);
break;
case M_SHORT:
case M_USHORT:
case M_HALF:
M_MIRROR_X(uint16_t);
break;
case M_INT:
case M_UINT:
M_MIRROR_X(uint32_t);
break;
case M_FLOAT:
M_MIRROR_X(float);
break;
default:
assert(0);
break;
}
}
#undef M_MIRROR_X
}
MIAPI void m_image_mirror_y(struct m_image *dest, const struct m_image *src)
{
#define M_MIRROR_Y(T)\
{\
T *src_data = (T *)src->data;\
T *dest_pixel = (T *)dest->data;\
for (y = 0; y < height; y++)\
for (x = 0; x < width; x++) {\
T *src_pixel = src_data + ((height - 1 - y) * width + x) * comp;\
for (c = 0; c < comp; c++)\
dest_pixel[c] = src_pixel[c];\
dest_pixel += comp;\
}\
}
if (dest == src) {
struct m_image tmp = M_IMAGE_IDENTITY();
m_image_copy(&tmp, src);
m_image_mirror_y(dest, &tmp);
m_image_destroy(&tmp);
}
else {
int width = src->width;
int height = src->height;
int comp = src->comp;
int x, y, c;
m_image_create(dest, src->type, width, height, comp);
switch(src->type)
{
case M_BYTE:
case M_UBYTE:
M_MIRROR_Y(uint8_t);
break;
case M_SHORT:
case M_USHORT:
case M_HALF:
M_MIRROR_Y(uint16_t);
break;
case M_INT:
case M_UINT:
M_MIRROR_Y(uint32_t);
break;
case M_FLOAT:
M_MIRROR_Y(float);
break;
default:
assert(0);
break;
}
}
#undef M_MIRROR_Y
}
MIAPI void m_image_premultiply(struct m_image *dest, const struct m_image *src)
{
float *dest_p, *src_p;
int i;
assert(src->size > 0 && src->type == M_FLOAT && src->comp == 4);
m_image_create(dest, M_FLOAT, src->width, src->height, 4);
dest_p = (float *)dest->data;
src_p = (float *)src->data;
for (i = 0; i < src->size; i+=4) {
dest_p[0] = src_p[0] * src_p[3];
dest_p[1] = src_p[1] * src_p[3];
dest_p[2] = src_p[2] * src_p[3];
dest_p[3] = src_p[3];
dest_p += 4;
src_p += 4;
}
}
MIAPI void m_image_unpremultiply(struct m_image *dest, const struct m_image *src)
{
float *dest_p, *src_p;
int i;
assert(src->size > 0 && src->type == M_FLOAT && src->comp == 4);
m_image_create(dest, M_FLOAT, src->width, src->height, 4);
dest_p = (float *)dest->data;
src_p = (float *)src->data;
for (i = 0; i < src->size; i+=4) {
if (src_p[3] > 0.0f) {
float x = 1.0f / src_p[3];
dest_p[0] = src_p[0] * x;
dest_p[1] = src_p[1] * x;
dest_p[2] = src_p[2] * x;
}
else {
dest_p[0] = 0;
dest_p[1] = 0;
dest_p[2] = 0;
}
dest_p[3] = src_p[3];
dest_p += 4;
src_p += 4;
}
}
MIAPI void m_image_sRGB_to_linear(struct m_image *dest, const struct m_image *src)
{
float *dest_p, *src_p;
int i, c, comp3 = M_MIN(src->comp, 3);
assert(src->size > 0 && src->type == M_FLOAT);
m_image_create(dest, M_FLOAT, src->width, src->height, src->comp);
dest_p = (float *)dest->data;
src_p = (float *)src->data;
if (dest == src) {
#pragma omp parallel for schedule(dynamic, 8)
for (i = 0; i < src->size; i+=src->comp) {
m_sRGB_to_linear(dest_p+i, src_p+i, comp3);
}
}
else {
#pragma omp parallel for schedule(dynamic, 8)
for (i = 0; i < src->size; i+=src->comp) {
m_sRGB_to_linear(dest_p+i, src_p+i, comp3);
for (c = comp3; c < src->comp; c++)
dest_p[i+c] = src_p[i+c];
}
}
}
MIAPI void m_image_linear_to_sRGB(struct m_image *dest, const struct m_image *src)
{
float *dest_p, *src_p;
int i, c, comp3 = M_MIN(src->comp, 3);
assert(src->size > 0 && src->type == M_FLOAT);
m_image_create(dest, M_FLOAT, src->width, src->height, src->comp);
dest_p = (float *)dest->data;
src_p = (float *)src->data;
if (dest == src) {
#pragma omp parallel for schedule(dynamic, 8)
for (i = 0; i < src->size; i+=src->comp) {
m_linear_to_sRGB(dest_p+i, src_p+i, comp3);
}
}
else {
#pragma omp parallel for schedule(dynamic, 8)
for (i = 0; i < src->size; i+=src->comp) {
m_linear_to_sRGB(dest_p+i, src_p+i, comp3);
for (c = comp3; c < src->comp; c++)
dest_p[i+c] = src_p[i+c];
}
}
}
MIAPI void m_image_summed_area(struct m_image *dest, const struct m_image *src)
{
float *src_pixel;
float *dest_pixel;
int width = src->width;
int height = src->height;
int comp = src->comp;
int x, y, c;
assert(src->size > 0 && src->type == M_FLOAT);
if (dest != src)
m_image_copy(dest, src);
/* horiz sum */
dest_pixel = (float *)dest->data;
for (y = 0; y < height; y++) {
float *prev_pixel = dest_pixel;
dest_pixel += comp;
for (x = 1; x < width; x++) {
for (c = 0; c < comp; c++)
dest_pixel[c] += prev_pixel[c];
prev_pixel = dest_pixel;
dest_pixel += comp;
}
}
/* vertical sum */
src_pixel = (float *)dest->data;
dest_pixel = (float *)dest->data + width * comp;
for (y = 1; y < height; y++)
for (x = 0; x < width; x++) {
for (c = 0; c < comp; c++)
dest_pixel[c] += src_pixel[c];
src_pixel += comp;
dest_pixel += comp;
}
}
MIAPI void m_image_convolution_h_raw(struct m_image *dest, const struct m_image *src, float *kernel, int size)
{
float *src_data;
float *dest_data;
int radius = (size - 1) / 2;
int width = src->width - radius * 2;
int height = src->height;
int comp = src->comp;
int y, ystep, ystepc;
assert(src->size > 0 && src->type == M_FLOAT);
/* create destination images */
m_image_create(dest, M_FLOAT, width, height, comp);
/* clear */
memset(dest->data, 0, dest->size * sizeof(float));
src_data = (float *)src->data;
dest_data = (float *)dest->data;
ystep = width * comp;
ystepc = src->width * comp;
#pragma omp parallel for schedule(dynamic, 8)
for (y = 0; y < height; y++) {
float *dest_pixel = dest_data + y * ystep;
float *src_pixel_y = src_data + y * ystepc;
int x;
for (x = 0; x < width; x++) {
float *src_pixel;
int i, k;
src_pixel = src_pixel_y + (x * comp);
/* apply kernel */
for (k = 0; k < size; k++) {
float v = kernel[k];
for (i = 0; i < comp; i++)
dest_pixel[i] += (*src_pixel++) * v;
}
dest_pixel += comp;
}
}
}
MIAPI void m_image_convolution_v_raw(struct m_image *dest, const struct m_image *src, float *kernel, int size)
{
float *src_data;
float *dest_data;
int radius = (size - 1) / 2;
int width = src->width;
int height = src->height - radius * 2;
int comp = src->comp;
int y, ystep;
assert(src->size > 0 && src->type == M_FLOAT);
/* create destination images */
m_image_create(dest, M_FLOAT, width, height, comp);
/* clear */
memset(dest->data, 0, dest->size * sizeof(float));
src_data = (float *)src->data;
dest_data = (float *)dest->data;
ystep = width * comp;
#pragma omp parallel for schedule(dynamic, 8)
for (y = 0; y < height; y++) {
float *dest_pixel = dest_data + y * ystep;
int x;
for (x = 0; x < width; x++) {
float *src_pixel;
int i, k;
src_pixel = src_data + (y * width + x) * comp;
/* apply kernel */
for (k = 0; k < size; k++) {
float v = kernel[k];
for (i = 0; i < comp; i++)
dest_pixel[i] += src_pixel[i] * v;
src_pixel += ystep;
}
dest_pixel += comp;
}
}
}
MIAPI void m_image_convolution_h(struct m_image *dest, const struct m_image *src, float *kernel, int size)
{
struct m_image mask = M_IMAGE_IDENTITY();
struct m_image tmp = M_IMAGE_IDENTITY();
float *destp;
int radius = (size - 1) / 2;
int x, y, c;
assert(src->size > 0 && src->type == M_FLOAT);
/* create source and destination images */
m_image_reframe_zero(&tmp, src, radius, 0, radius, 0); /* apply clamped margin */
m_image_convolution_h_raw(dest, &tmp, kernel, size);
/* create gaussian mask */
m_image_create(&tmp, M_FLOAT, src->width + radius * 2, 1, 1);
for (x = 0; x < radius; x++)
((float *)tmp.data)[x] = 0;
for (; x < (tmp.width - radius); x++)
((float *)tmp.data)[x] = 1;
for (; x < tmp.width; x++)
((float *)tmp.data)[x] = 0;
m_image_convolution_h_raw(&mask, &tmp, kernel, size);
/* mask */
for (x = 0; x < mask.width; x++)
((float *)mask.data)[x] = 1.0f / ((float *)mask.data)[x];
destp = (float *)dest->data;
for (y = 0; y < dest->height; y++) {
for (x = 0; x < dest->width; x++) {
for (c = 0; c < dest->comp; c++)
destp[c] *= ((float *)mask.data)[x];
destp += dest->comp;
}
}
m_image_destroy(&mask);
m_image_destroy(&tmp);
}
MIAPI void m_image_convolution_v(struct m_image *dest, const struct m_image *src, float *kernel, int size)
{
struct m_image tmp = M_IMAGE_IDENTITY();
struct m_image mask = M_IMAGE_IDENTITY();
float *destp;
int radius = (size - 1) / 2;
int x, y, c;
assert(src->size > 0 && src->type == M_FLOAT);
/* create source and destination images */
m_image_reframe_zero(&tmp, src, 0, radius, 0, radius); /* apply clamped margin */
m_image_convolution_v_raw(dest, &tmp, kernel, size);
/* create gaussian mask */
m_image_create(&tmp, M_FLOAT, 1, src->height + radius * 2, 1);
for (y = 0; y < radius; y++)
((float *)tmp.data)[y] = 0;
for (; y < (tmp.height - radius); y++)
((float *)tmp.data)[y] = 1;
for (; y < tmp.height; y++)
((float *)tmp.data)[y] = 0;
m_image_convolution_v_raw(&mask, &tmp, kernel, size);
/* mask */
destp = (float *)dest->data;
for (y = 0; y < dest->height; y++) {
float idiv = 1.0f / ((float *)mask.data)[y];
for (x = 0; x < dest->width; x++) {
for (c = 0; c < dest->comp; c++)
destp[c] *= idiv;
destp += dest->comp;
}
}
m_image_destroy(&mask);
m_image_destroy(&tmp);
}
MIAPI void m_image_gaussian_blur(struct m_image *dest, const struct m_image *src, float dx, float dy)
{
struct m_image tmp = M_IMAGE_IDENTITY();
float *kernelx = NULL, *kernely = NULL;
int kernelx_size = (int)(dx / 0.65f + 0.5f) * 2 + 1;
int kernely_size = (int)(dy / 0.65f + 0.5f) * 2 + 1;
assert(src->size > 0 && src->type == M_FLOAT);
/* exit */
if (dx < FLT_EPSILON && dy < FLT_EPSILON) {
if (dest != src) m_image_copy(dest, src);
return;
}
/* x blur */
if (dx > 0) {
kernelx = (float *)malloc(kernelx_size * sizeof(float));
m_gaussian_kernel(kernelx, kernelx_size, dx);
if (dy > 0)
m_image_convolution_h(&tmp, src, kernelx, kernelx_size);
else
m_image_convolution_h(dest, src, kernelx, kernelx_size);
}
/* y blur */
if (dy > 0) {
kernely = (float *)malloc(kernely_size * sizeof(float));
m_gaussian_kernel(kernely, kernely_size, dy);
if (dx > 0)
m_image_convolution_v(dest, &tmp, kernely, kernely_size);
else
m_image_convolution_v(dest, src, kernely, kernely_size);
}
m_image_destroy(&tmp);
if(kernely) free(kernely);
if(kernelx) free(kernelx);
}
MIAPI void m_image_grey(struct m_image *dest, const struct m_image *src)
{
if (dest == src) {
struct m_image tmp = M_IMAGE_IDENTITY();
m_image_copy(&tmp, src);
m_image_grey(dest, &tmp);
m_image_destroy(&tmp);
}
else {
float *src_pixel;
float *dest_pixel;
int size = src->size;
int i, c = src->comp;
assert(src->size > 0 && src->type == M_FLOAT && src->comp > 2);
m_image_create(dest, M_FLOAT, src->width, src->height, 1);
src_pixel = (float *)src->data;
dest_pixel = (float *)dest->data;
for (i = 0; i < size; i+=c) {
float v = src_pixel[0] * 0.3f + src_pixel[1] * 0.5f + src_pixel[2] * 0.2f;
*dest_pixel = v;
dest_pixel++;
src_pixel+=c;
}
}
}
MIAPI void m_image_max(struct m_image *dest, const struct m_image *src)
{
if (dest == src) {
struct m_image tmp = M_IMAGE_IDENTITY();
m_image_copy(&tmp, src);
m_image_max(dest, &tmp);
m_image_destroy(&tmp);
}
else {
float *src_pixel;
float *dest_pixel;
int size = src->size;
int i, j, c = src->comp;
assert(src->size > 0 && src->type == M_FLOAT);
m_image_create(dest, M_FLOAT, src->width, src->height, 1);
src_pixel = (float *)src->data;
dest_pixel = (float *)dest->data;
for (i = 0; i < size; i+=c) {
float v = src_pixel[0];
for (j = 1; j < c; j++)
v = M_MAX(v, src_pixel[j]);
*dest_pixel = v;
dest_pixel++;
src_pixel+=c;
}
}
}
MIAPI void m_image_max_abs(struct m_image *dest, const struct m_image *src)
{
if (dest == src) {
struct m_image tmp = M_IMAGE_IDENTITY();
m_image_copy(&tmp, src);
m_image_max_abs(dest, &tmp);
m_image_destroy(&tmp);
}
else {
float *src_pixel;
float *dest_pixel;
int size = src->size;
int i, j, c = src->comp;
assert(src->size > 0 && src->type == M_FLOAT);
m_image_create(dest, M_FLOAT, src->width, src->height, 1);
src_pixel = (float *)src->data;
dest_pixel = (float *)dest->data;
for (i = 0; i < size; i+=c) {
float v = fabsf(src_pixel[0]);
for (j = 1; j < c; j++)
v = M_MAX(v, fabsf(src_pixel[j]));
*dest_pixel = v;
dest_pixel++;
src_pixel+=c;
}
}
}
static float m__convolve_pixel(float *data, int width, float *kernel)
{
float sum = 0; int i, j;
for (i = 0; i < 3; i++) {
float *pixel = data + width * i;
for (j = 0; j < 3; j++) {
sum += (*pixel) * (*kernel);
pixel++;
kernel++;
}
}
return sum;
}
MIAPI void m_image_sobel(struct m_image *dest, const struct m_image *src)
{
struct m_image copy = M_IMAGE_IDENTITY();
float ky[9] = {-1, -2, -1, 0, 0, 0, 1, 2, 1};
float kx[9] = {-1, 0, 1, -2, 0, 2, -1, 0, 1};
int width = src->width;
int height = src->height;
int w2 = width + 2;
int y;
assert(src->size > 0 && src->type == M_FLOAT && src->comp == 1);
/* create source and destination images */
m_image_reframe(©, src, 1, 1, 1, 1); /* apply clamped margin */
m_image_create(dest, M_FLOAT, width, height, 2);
#pragma omp parallel for schedule(dynamic, 8)
for (y = 0; y < height; y++) {
float * src_pixel = (float*)copy.data + y * w2;
float * dest_pixel = (float*)dest->data + y * width * 2;
int x;
for (x = 0; x < width; x++) {
dest_pixel[0] = m__convolve_pixel(src_pixel, w2, kx);
dest_pixel[1] = m__convolve_pixel(src_pixel, w2, ky);
src_pixel++;
dest_pixel += 2;
}
}
m_image_destroy(©);
}
MIAPI void m_image_harris(struct m_image *dest, const struct m_image *src, float radius)
{
struct m_image tmp1 = M_IMAGE_IDENTITY();
struct m_image tmp2 = M_IMAGE_IDENTITY();
/* sobel */
m_image_sobel(&tmp1, src);
/* sst */
m_image_create(&tmp2, M_FLOAT, src->width, src->height, 3);
m_sst((float *)tmp2.data, (float *)tmp1.data, src->width * src->height);
/* blur */
m_image_copy(&tmp1, &tmp2);
m_image_gaussian_blur(&tmp2, &tmp1, radius, radius);
/* harris response */
m_image_create(dest, M_FLOAT, src->width, src->height, 1);
m_harris_response((float *)dest->data, (float *)tmp2.data, src->width * src->height);
m_image_destroy(&tmp1);
m_image_destroy(&tmp2);
}
#define M_WRITE_PIXEL(dest, x0, y0, v) {*(dest + w * (y0) + (x0)) = v;}
#define M_PUSH_PIXEL(x2, y2) if((stack_i+3) < stack_size && m__test_pixel(data, w, h, x2, y2, ref)) {\
stack_i+=2;\
stack[stack_i] = (uint16_t)(x2);\
stack[stack_i+1] = (uint16_t)(y2);\
M_WRITE_PIXEL(data, x2, y2, value);\
}
static int m__test_pixel(uint8_t *src, int w, int h, int x, int y, uint8_t ref)
{
if (! (x >= 0 && x < w && y >= 0 && y < h))
return 0;
return (*(src + w * y + x) == ref);
}
MIAPI int m_image_floodfill_4x(struct m_image *dest, int x, int y, uint8_t ref, uint8_t value, uint16_t *stack, int stack_size)
{
uint8_t *data = (uint8_t *)dest->data;
int w = dest->width;
int h = dest->height;
int stack_i = 0;
assert(dest->size > 0 && dest->type == M_UBYTE);
if(! m__test_pixel(data, w, h, x, y, ref))
return 0;
stack[0] = (uint16_t)x;
stack[1] = (uint16_t)y;
M_WRITE_PIXEL(data, x, y, value);
while (stack_i >= 0) {
x = stack[stack_i];
y = stack[stack_i+1];
stack_i-=2;
M_PUSH_PIXEL(x + 1, y)
M_PUSH_PIXEL(x - 1, y)
M_PUSH_PIXEL(x, y + 1)
M_PUSH_PIXEL(x, y - 1)
}
return 1;
}
MIAPI int m_image_floodfill_8x(struct m_image *dest, int x, int y, uint8_t ref, uint8_t value, uint16_t *stack, int stack_size)
{
uint8_t *data = (uint8_t *)dest->data;
int w = dest->width;
int h = dest->height;
int stack_i = 0;
assert(dest->size > 0 && dest->type == M_UBYTE);
if(! m__test_pixel(data, w, h, x, y, ref))
return 0;
stack[0] = (uint16_t)x;
stack[1] = (uint16_t)y;
M_WRITE_PIXEL(data, x, y, value);
while (stack_i >= 0) {
x = stack[stack_i];
y = stack[stack_i+1];
stack_i-=2;
M_PUSH_PIXEL(x + 1, y)
M_PUSH_PIXEL(x - 1, y)
M_PUSH_PIXEL(x, y + 1)
M_PUSH_PIXEL(x, y - 1)
M_PUSH_PIXEL(x + 1, y + 1)
M_PUSH_PIXEL(x + 1, y - 1)
M_PUSH_PIXEL(x - 1, y + 1)
M_PUSH_PIXEL(x - 1, y - 1)
}
return 1;
}
#undef M_WRITE_PIXEL
#undef M_PUSH_PIXEL
static void m__dilate_erode(struct m_image *dest, const struct m_image *src, uint8_t ref, uint8_t value, int copy)
{
uint8_t *src_data = (uint8_t *)src->data;
uint8_t *src_pixel = src_data;
uint8_t *dest_pixel;
int w = src->width;
int h = src->height;
int y;
assert(src->size > 0 && src->type == M_UBYTE);
m_image_create(dest, M_UBYTE, w, h, 1);
dest_pixel = (uint8_t *)dest->data;
if (copy)
memcpy(dest_pixel, src_data, dest->size * sizeof(char));
else
memset(dest_pixel, 0, dest->size * sizeof(char));
for (y=0; y<h; y++) {
int x;
for (x=0; x<w; x++) {
uint8_t c1, c2, c3, c4, c5;
c1 = *src_pixel;
if (c1 == ref) {
c2 = x > 0 ? *(src_data + y * w + (x - 1)) : c1;
c3 = y > 0 ? *(src_data + (y - 1) * w + x) : c1;
c4 = (x + 1) < w ? *(src_data + y * w + x + 1) : c1;
c5 = (y + 1) < h ? *(src_data + (y + 1) * w + x) : c1;
if (c2 != c1 || c3 != c1 || c4 != c1 || c5 != c1)
*dest_pixel = value;
}
src_pixel++;
dest_pixel++;
}
}
}
MIAPI void m_image_dilate(struct m_image *dest, const struct m_image *src)
{
if (dest == src) {
struct m_image tmp = M_IMAGE_IDENTITY();
m_image_copy(&tmp, src);
m__dilate_erode(dest, &tmp, 0, 255, 1);
m_image_destroy(&tmp);
}
else {
m__dilate_erode(dest, src, 0, 255, 1);
}
}
MIAPI void m_image_erode(struct m_image *dest, const struct m_image *src)
{
if (dest == src) {
struct m_image tmp = M_IMAGE_IDENTITY();
m_image_copy(&tmp, src);
m__dilate_erode(dest, &tmp, 255, 0, 1);
m_image_destroy(&tmp);
}
else {
m__dilate_erode(dest, src, 255, 0, 1);
}
}
MIAPI void m_image_edge_4x(struct m_image *dest, const struct m_image *src, uint8_t ref)
{
if (dest == src) {
struct m_image tmp = M_IMAGE_IDENTITY();
m_image_copy(&tmp, src);
m__dilate_erode(dest, &tmp, ref, 255, 0);
m_image_destroy(&tmp);
}
else {
m__dilate_erode(dest, src, ref, 255, 0);
}
}
/* Following C code from the article
"Efficient Binary Image Thinning using Neighborhood Maps"
by Joseph M. Cychosz, in "Graphics Gems IV", Academic Press, 1994
Thins the image using Rosenfeld's parallel thinning algorithm.
*/
/* Direction m__masks:
N S W E
*/
static int m__masks[] = {0200, 0002, 0040, 0010};
/* True if pixel neighbor map indicates the pixel is 8-simple and
not an end point and thus can be deleted. The neighborhood
map is defined as an integer of bits abcdefghi with a non-zero
bit representing a non-zero pixel. The bit assignment for the
neighborhood is:
a b c
d e f
g h i
*/
static uint8_t m__delete_map[512] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
MIAPI void m_image_thin(struct m_image *dest)
{
uint8_t *data; /* image data */
uint8_t ** ip; /* scanline pointers, ip[y][x] */
uint8_t * qb; /* Neighborhood maps of previous scanline */
int xsize, ysize; /* Image resolution */
int x, y; /* Pixel location */
int i; /* Pass index */
int pc = 0; /* Pass count */
int count = 1; /* Deleted pixel count */
int p, q; /* Neighborhood maps of adjacent cells */
int m; /* Deletion direction mask */
assert(dest->size > 0 && dest->type == M_UBYTE);
data = (uint8_t *)dest->data;
xsize = dest->width;
ysize = dest->height;
qb = (uint8_t *)malloc(xsize * sizeof(char));
qb[xsize-1] = 0; /* Used for lower-right pixel */
/* alloc scanline pointers */
ip = (uint8_t **)malloc(sizeof(void *) * ysize);
/* set scanline pointers */
for (y=0; y<ysize; y++) {
ip[y] = data + y*xsize;
}
while (count) { /* Scan image while deletions */
pc++;
count = 0;
for (i=0; i<4; i++) {
m = m__masks[i];
/* Build initial previous scan buffer */
p = ip[0][0] != 0;
for (x=0; x<xsize-1; x++) {
p = ((p<<1)&0006) | (ip[0][x+1] != 0);
qb[x] = (uint8_t)p;
}
/* Scan image for pixel deletion candidates */
for (y=0; y<ysize-1; y++) {
q = qb[0];
p = ((q<<3)&0110) | (ip[y+1][0] != 0);
for (x=0; x<xsize-1; x++) {
q = qb[x];
p = ((p<<1)&0666) | ((q<<3)&0110) | (ip[y+1][x+1] != 0);
qb[x] = (uint8_t)p;
if (((p&m) == 0) && m__delete_map[p]) {
if (ip[y][x] != 0) {
count++;
ip[y][x] = 0;
}
}
}
/* Process right edge pixel */
p = (p<<1)&0666;
if ((p&m) == 0 && m__delete_map[p]) {
if (ip[y][xsize-1] != 0) {
count++;
ip[y][xsize-1] = 0;
}
}
}
/* Process bottom scan line */
for (x=0; x<xsize; x++) {
q = qb[x];
p = ((p<<1)&0666) | ((q<<3)&0110);
if ((p&m) == 0 && m__delete_map[p]) {
if (ip[ysize-1][x] != 0) {
count++;
ip[ysize-1][x] = 0;
}
}
}
}
}
free(qb);
free(ip);
}
MIAPI void m_image_non_max_supp(struct m_image *dest, const struct m_image *src, int radius, float threshold)
{
float *src_data, *dest_data;
float *src_pixel, *dest_pixel;
int width = src->width;
int height = src->height;
int x, y;
assert(src->size > 0 && src->type == M_FLOAT && src->comp == 1);
m_image_copy(dest, src);
src_data = (float *)src->data;
dest_data = (float *)dest->data;
src_pixel = src_data;
dest_pixel = dest_data;
for (y = 0; y < height; y++)
for (x = 0; x < width; x++) {
int minx, miny, maxx, maxy, xx, yy;
if (*src_pixel < threshold) {
*dest_pixel = 0;
goto end;
}
minx = M_MAX(0, x - radius);
miny = M_MAX(0, y - radius);
maxx = M_MIN(width - 1, x + radius);
maxy = M_MIN(height - 1, y + radius);
for (yy = miny; yy <= maxy; yy++)
for (xx = minx; xx <= maxx; xx++) {
float *src_pixel2 = src_data + yy*width + xx;
if (*src_pixel2 > *src_pixel) {
*dest_pixel = 0;
goto end;
}
}
end:
src_pixel++;
dest_pixel++;
}
}
MIAPI int m_image_corner_harris(const struct m_image *src, int margin, float radius, float threshold, int *corners, int max_count)
{
struct m_image harris = M_IMAGE_IDENTITY();
struct m_image nms = M_IMAGE_IDENTITY();
float *pixel;
int width = src->width;
int height = src->height;
int wm = width - margin;
int hm = height - margin;
int x, y, count;
if (width <= (margin * 2) || height <= (margin * 2))
return 0;
m_image_harris(&harris, src, radius);
m_image_non_max_supp(&nms, &harris, (int)(radius) + 1, threshold);
count = 0;
pixel = (float *)nms.data;
for (y = 0; y < height; y++)
for (x = 0; x < width; x++) {
if (count == max_count)
goto end;
if ((*pixel) > 0 && x >= margin && y >= margin && x < wm && y < hm) {
corners[count*2] = x;
corners[count*2+1] = y;
count++;
}
pixel++;
}
end:
m_image_destroy(&nms);
m_image_destroy(&harris);
return count;
}
MIAPI void m_image_sub_pixel(const struct m_image *src, float x, float y, float *result)
{
float *colors0, *colors1, *colors2, *colors3;
float *src_data = (float *)src->data;
int width = src->width;
int height = src->height;
int comp = src->comp;
int c;
float fx, fy;
int wm = width - 1;
int hm = height - 1;
int ix, iy, ix2, iy2;
ix = (int)x;
iy = (int)y;
fx = x - (float)ix;
fy = y - (float)iy;
fx = M_MAX(fx, 0);
fy = M_MAX(fy, 0);
ix = M_CLAMP(ix, 0, wm);
iy = M_CLAMP(iy, 0, hm);
ix2 = ix + 1;
iy2 = iy + 1;
ix2 = M_MIN(ix2, wm);
iy2 = M_MIN(iy2, hm);
colors0 = src_data + (width * iy + ix) * comp;
colors1 = src_data + (width * iy + ix2) * comp;
colors2 = src_data + (width * iy2 + ix) * comp;
colors3 = src_data + (width * iy2 + ix2) * comp;
for(c = 0; c < comp; c++) {
float A = colors0[c] + (colors2[c] - colors0[c]) * fy;
float B = colors1[c] + (colors3[c] - colors1[c]) * fy;
result[c] = A + (B - A) * fx;
}
}
/* slow TODO better */
static void m__bilinear(struct m_image *dest, const struct m_image *src, float dx, float dy, float offset)
{
float *dest_data = (float *)dest->data;
int width = dest->width;
int height = dest->height;
int comp = src->comp;
int y, ystep = width * comp;
#pragma omp parallel for schedule(dynamic, 8)
for (y = 0; y < height; y++) {
float *dest_pixel = dest_data + y * ystep; int x;
for (x = 0; x < width; x++) {
m_image_sub_pixel(src, ((float)x + 0.5f) * dx + offset, ((float)y + 0.5f) * dy + offset, dest_pixel);
dest_pixel += comp;
}
}
}
MIAPI void m_image_pyrdown(struct m_image *dest, const struct m_image *src)
{
struct m_image tmp = M_IMAGE_IDENTITY();
float *src_data;
float *dest_pixel;
int width = src->width;
int height = src->height;
int comp = src->comp;
int comp2 = comp * 2;
int ystep = width * comp * 2;
int w2 = width / 2;
int h2 = height / 2;
int x, y, i;
m_image_gaussian_blur(&tmp, src, 1.5f, 1.5f);
m_image_create(dest, M_FLOAT, w2, h2, comp);
src_data = (float *)tmp.data;
dest_pixel = (float *)dest->data;
for (y = 0; y < h2; y++) {
float *src_pixel = src_data + y * ystep;
for (x = 0; x < w2; x++) {
for (i = 0; i < comp; i++)
dest_pixel[i] = src_pixel[i];
dest_pixel += comp;
src_pixel += comp2;
}
}
m_image_destroy(&tmp);
}
MIAPI void m_image_resize(struct m_image *dest, const struct m_image *src, int new_width, int new_height)
{
struct m_image tmp = M_IMAGE_IDENTITY();
int width = src->width;
int height = src->height;
int comp = src->comp;
float rx = (float)width / (float)new_width;
float ry = (float)height / (float)new_height;
assert(src->size > 0 && src->type == M_FLOAT);
if (rx > 1.0f || ry > 1.0f) {
m_image_gaussian_blur(&tmp, src, M_MAX(0.0f, rx - 1.0f), M_MAX(0.0f, ry - 1.0f));
m_image_create(dest, M_FLOAT, new_width, new_height, comp);
m__bilinear(dest, &tmp, rx, ry, -0.5f);
}
else {
if (dest == src) {
m_image_copy(&tmp, src);
src = &tmp;
}
m_image_create(dest, M_FLOAT, new_width, new_height, comp);
m__bilinear(dest, src, rx, ry, -0.5f);
}
m_image_destroy(&tmp);
}
#endif /* M_IMAGE_IMPLEMENTATION */
|
omp_getEnvInfo.c | /******************************************************************************
* FILE: omp_getEnvInfo.c
* DESCRIPTION:
* OpenMP Example - Get Environment Information - C/C++ Version
* The master thread queries and prints selected environment information.
* AUTHOR: Blaise Barney 7/06
* LAST REVISED: 07/12/06
******************************************************************************/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
int main (int argc, char *argv[])
{
int nthreads, tid, procs, maxt, inpar, dynamic, nested;
/* Start parallel region */
#pragma omp parallel private(nthreads, tid)
{
/* Obtain thread number */
tid = omp_get_thread_num();
/* Only master thread does this */
if (tid == 0)
{
printf("Thread %d getting environment info...\n", tid);
/* Get environment information */
procs = omp_get_num_procs();
nthreads = omp_get_num_threads();
maxt = omp_get_max_threads();
inpar = omp_in_parallel();
dynamic = omp_get_dynamic();
nested = omp_get_nested();
/* Print environment information */
printf("Number of processors = %d\n", procs);
printf("Number of threads = %d\n", nthreads);
printf("Max threads = %d\n", maxt);
printf("In parallel? = %d\n", inpar);
printf("Dynamic threads enabled? = %d\n", dynamic);
printf("Nested parallelism supported? = %d\n", nested);
}
} /* Done */
}
|
GB_unop__minv_int16_int16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__minv_int16_int16)
// op(A') function: GB (_unop_tran__minv_int16_int16)
// C type: int16_t
// A type: int16_t
// cast: int16_t cij = aij
// unaryop: cij = GB_IMINV_SIGNED (aij, 16)
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_SIGNED (x, 16) ;
// casting
#define GB_CAST(z, aij) \
int16_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int16_t z = aij ; \
Cx [pC] = GB_IMINV_SIGNED (z, 16) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__minv_int16_int16)
(
int16_t *Cx, // Cx and Ax may be aliased
const int16_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int16_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int16_t aij = Ax [p] ;
int16_t z = aij ;
Cx [p] = GB_IMINV_SIGNED (z, 16) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int16_t aij = Ax [p] ;
int16_t z = aij ;
Cx [p] = GB_IMINV_SIGNED (z, 16) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__minv_int16_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
resource_manager.h | // -----------------------------------------------------------------------------
//
// Copyright (C) The BioDynaMo Project.
// All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
//
// See the LICENSE file distributed with this work for details.
// See the NOTICE file distributed with this work for additional information
// regarding copyright ownership.
//
// -----------------------------------------------------------------------------
#ifndef RESOURCE_MANAGER_H_
#define RESOURCE_MANAGER_H_
#include <Rtypes.h>
#include <limits>
#include <memory>
#include <ostream>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#ifdef USE_OPENCL
#define __CL_ENABLE_EXCEPTIONS
#ifdef __APPLE__
#include <OpenCL/cl.hpp>
#else
#include <CL/cl.hpp>
#endif
#endif
#include "backend.h"
#include "diffusion_grid.h"
#include "tuple_util.h"
#include "variadic_template_parameter_util.h"
namespace bdm {
/// Unique identifier of a simulation object. Acts as a type erased pointer.
/// Has the same type for every simulation object.
/// The id is split into two parts: Type index and element index.
/// The first one is used to obtain the container in the ResourceManager, the
/// second specifies the element within this vector.
class SoHandle {
public:
constexpr SoHandle() noexcept
: type_idx_(std::numeric_limits<decltype(type_idx_)>::max()),
element_idx_(std::numeric_limits<decltype(element_idx_)>::max()) {}
SoHandle(uint16_t type_idx, uint32_t element_idx)
: type_idx_(type_idx), element_idx_(element_idx) {}
uint16_t GetTypeIdx() const { return type_idx_; }
uint32_t GetElementIdx() const { return element_idx_; }
void SetElementIdx(uint32_t element_idx) { element_idx_ = element_idx; }
bool operator==(const SoHandle& other) const {
return type_idx_ == other.type_idx_ && element_idx_ == other.element_idx_;
}
bool operator!=(const SoHandle& other) const { return !(*this == other); }
bool operator<(const SoHandle& other) const {
if (type_idx_ == other.type_idx_) {
return element_idx_ < other.element_idx_;
} else {
return type_idx_ < other.type_idx_;
}
}
friend std::ostream& operator<<(std::ostream& stream,
const SoHandle& handle) {
stream << "Type idx: " << handle.type_idx_
<< " element idx: " << handle.element_idx_;
return stream;
}
private:
// TODO(lukas) add using TypeIdx_t = uint16_t and
// using ElementIdx_t = uint32_t
uint16_t type_idx_;
/// changed element index to uint32_t after issues with std::atomic with
/// size 16 -> max element_idx: 4.294.967.296
uint32_t element_idx_;
ClassDefNV(SoHandle, 1);
};
constexpr SoHandle kNullSoHandle;
namespace detail {
/// \see bdm::ConvertToContainerTuple, VariadicTypedef
template <typename Backend, typename... Types>
struct ConvertToContainerTuple {};
/// \see bdm::ConvertToContainerTuple, VariadicTypedef
template <typename Backend, typename... Types>
struct ConvertToContainerTuple<Backend, VariadicTypedef<Types...>> {
// Helper alias to get the container type associated with Backend
template <typename T>
using Container = typename Backend::template Container<T>;
// Helper type alias to get a type with certain Backend
template <typename T>
using ToBackend = typename T::template Self<Backend>;
using type = std::tuple<Container<ToBackend<Types>>...>; // NOLINT
};
/// Type trait to obtain the index of a type within a tuple.
/// Required to extract variadic types from withi a `VariadicTypedef`
template <typename TSo, typename... Types>
struct ToIndex;
template <typename TSo, typename... Types>
struct ToIndex<TSo, VariadicTypedef<Types...>> {
static constexpr uint16_t value = GetIndex<TSo, Types...>(); // NOLINT
};
} // namespace detail
/// Create a tuple of types in the parameter pack and wrap each type with
/// container.
/// @tparam Backend in which the variadic types should be stored in
/// @tparam TVariadicTypedefWrapper type that wraps a VariadicTypedef
/// which in turn contains the variadic template parameters
/// \see VariadicTypedefWrapper
template <typename Backend, typename TVariadicTypedef>
struct ConvertToContainerTuple {
typedef
typename detail::ConvertToContainerTuple<Backend, TVariadicTypedef>::type
type; // NOLINT
};
/// Forward declaration for concrete compile time parameter.
/// Will be used as default template parameter.
template <typename TBackend = Soa>
struct CompileTimeParam;
/// ResourceManager holds a container for each atomic type in the simulation.
/// It provides methods to get a certain container, execute a function on a
/// a certain element, all elements of a certain type or all elements inside
/// the ResourceManager. Elements are uniquely identified with its SoHandle.
/// Furthermore, the types specified in AtomicTypes are backend invariant
/// Hence it doesn't matter which version of the Backend is specified.
/// ResourceManager internally uses the TBackendWrapper parameter to convert
/// all atomic types to the desired backend.
/// This makes user code easier since atomic types can be specified as scalars.
/// @tparam TCompileTimeParam type that containes the compile time parameter for
/// a specific simulation. ResourceManager extracts Backend and AtomicTypes.
template <typename TCompileTimeParam = CompileTimeParam<>>
class ResourceManager {
public:
using Backend = typename TCompileTimeParam::SimulationBackend;
using Types = typename TCompileTimeParam::AtomicTypes;
/// Determine Container based on the Backend
template <typename T>
using TypeContainer = typename Backend::template Container<T>;
/// Helper type alias to get a type with certain Backend
template <typename T>
using ToBackend = typename T::template Self<Backend>;
/// Singleton pattern - return the only instance with this template parameters
static ResourceManager<TCompileTimeParam>* Get() { return instance_.get(); }
/// Return the container of this Type
/// @tparam Type atomic type whose container should be returned
/// invariant to the Backend. This means that even if ResourceManager
/// stores e.g. `SoaCell`, Type can be `Cell` and still returns the
/// correct container.
template <typename Type>
TypeContainer<ToBackend<Type>>* Get() {
return &std::get<TypeContainer<ToBackend<Type>>>(data_);
}
/// Return the container of diffusion grids
std::vector<DiffusionGrid*>& GetDiffusionGrids() { return diffusion_grids_; }
/// Return the diffusion grid which holds the substance of specified id
DiffusionGrid* GetDiffusionGrid(size_t substance_id) {
assert(substance_id < diffusion_grids_.size() &&
"You tried to access a diffusion grid that does not exist!");
return diffusion_grids_[substance_id];
}
/// Return the diffusion grid which holds the substance of specified name
DiffusionGrid* GetDiffusionGrid(std::string substance_name) {
for (auto dg : diffusion_grids_) {
if (dg->GetSubstanceName() == substance_name) {
return dg;
}
}
return nullptr;
}
/// Returns the total number of simulation objects
size_t GetNumSimObjects() {
size_t num_so = 0;
for (uint16_t i = 0; i < std::tuple_size<decltype(data_)>::value; i++) {
::bdm::Apply(&data_, i,
[&](auto* container) { num_so += container->size(); });
}
return num_so;
}
/// Default constructor. Unfortunately needs to be public although it is
/// a singleton to be able to use ROOT I/O
ResourceManager() {
// Soa container contain one element upon construction
Clear();
}
/// Free the memory that was reserved for the diffusion grids
virtual ~ResourceManager() {
for (auto grid : diffusion_grids_) {
delete grid;
}
}
/// Apply a function on a certain element
/// @param handle - simulation object id; specifies the tuple index and
/// element index \see SoHandle
/// @param function that will be called with the element as a parameter
///
/// rm->ApplyOnElement(handle, [](auto& element) {
/// std::cout << element << std::endl;
/// });
template <typename TFunction>
auto ApplyOnElement(SoHandle handle, TFunction&& function) {
auto type_idx = handle.GetTypeIdx();
auto element_idx = handle.GetElementIdx();
return ::bdm::Apply(&data_, type_idx, [&](auto* container) -> decltype(
function((*container)[0])) {
return function((*container)[element_idx]);
});
}
/// Apply a function on all container types
/// @param function that will be called with each container as a parameter
///
/// rm->ApplyOnAllTypes([](auto* container, uint16_t type_idx) {
/// std::cout << container->size() << std::endl;
/// });
template <typename TFunction>
void ApplyOnAllTypes(TFunction&& function) {
// runtime dispatch - TODO(lukas) replace with c++17 std::apply
for (uint16_t i = 0; i < std::tuple_size<decltype(data_)>::value; i++) {
::bdm::Apply(&data_, i, [&](auto* container) { function(container, i); });
}
}
/// Apply a function on all container types. Function invocations are
/// parallelized
/// @param function that will be called with each container as a parameter
///
/// rm->ApplyOnAllTypes([](auto* container, uint16_t type_idx) {
/// std::cout << container->size() << std::endl;
/// });
template <typename TFunction>
void ApplyOnAllTypesParallel(TFunction&& function) {
// runtime dispatch - TODO(lukas) replace with c++17 std::apply
for (uint16_t i = 0; i < std::tuple_size<decltype(data_)>::value; i++) {
::bdm::Apply(&data_, i, [&](auto* container) { function(container, i); });
}
}
/// Apply a function on all elements in every container
/// @param function that will be called with each container as a parameter
///
/// rm->ApplyOnAllElements([](auto& element, SoHandle handle) {
/// std::cout << element << std::endl;
/// });
template <typename TFunction>
void ApplyOnAllElements(TFunction&& function) {
// runtime dispatch - TODO(lukas) replace with c++17 std::apply
for (uint16_t i = 0; i < std::tuple_size<decltype(data_)>::value; i++) {
::bdm::Apply(&data_, i, [&](auto* container) {
for (size_t e = 0; e < container->size(); e++) {
function((*container)[e], SoHandle(i, e));
}
});
}
}
/// Apply a function on all elements in every container
/// Function invocations are parallelized
/// \see ApplyOnAllElements
template <typename TFunction>
void ApplyOnAllElementsParallel(TFunction&& function) {
// runtime dispatch - TODO(lukas) replace with c++17 std::apply
for (uint16_t i = 0; i < std::tuple_size<decltype(data_)>::value; i++) {
::bdm::Apply(&data_, i, [&](auto* container) {
#pragma omp parallel for
for (size_t e = 0; e < container->size(); e++) {
function((*container)[e], SoHandle(i, e));
}
});
}
}
/// Remove elements from each type
void Clear() {
ApplyOnAllTypes(
[](auto* container, uint16_t type_idx) { container->clear(); });
}
template <typename TSo>
void push_back(const TSo& so) { // NOLINT
Get<TSo>()->push_back(so);
}
#ifdef USE_OPENCL
cl::Context* GetOpenCLContext() { return &opencl_context_; }
cl::CommandQueue* GetOpenCLCommandQueue() { return &opencl_command_queue_; }
std::vector<cl::Device>* GetOpenCLDeviceList() { return &opencl_devices_; }
std::vector<cl::Program>* GetOpenCLProgramList() { return &opencl_programs_; }
#endif
/// Create a new simulation object and return a reference to it.
/// @tparam TScalarSo simulation object type with scalar backend
/// @param args arguments which will be forwarded to the TScalarSo constructor
/// @remarks Note that this function is not thread safe.
template <typename TScalarSo, typename... Args, typename TBackend = Backend>
typename std::enable_if<std::is_same<TBackend, Soa>::value,
typename TScalarSo::template Self<SoaRef>>::type
New(Args... args) {
auto container = Get<TScalarSo>();
auto idx =
container->DelayedPushBack(TScalarSo(std::forward<Args>(args)...));
return (*container)[idx];
}
template <typename TScalarSo, typename... Args, typename TBackend = Backend>
typename std::enable_if<std::is_same<TBackend, Scalar>::value,
TScalarSo&>::type
New(Args... args) {
auto container = Get<TScalarSo>();
auto idx =
container->DelayedPushBack(TScalarSo(std::forward<Args>(args)...));
return (*container)[idx];
}
/// Returns the number of simulation object types
static constexpr size_t NumberOfTypes() {
return std::tuple_size<decltype(data_)>::value;
}
template <typename TSo>
static constexpr uint16_t GetTypeIndex() {
return detail::ToIndex<TSo, Types>::value;
}
private:
static std::unique_ptr<ResourceManager<TCompileTimeParam>> instance_;
/// creates one container for each type in Types.
/// Container type is determined based on the specified Backend
typename ConvertToContainerTuple<Backend, Types>::type data_;
std::vector<DiffusionGrid*> diffusion_grids_;
#ifdef USE_OPENCL
cl::Context opencl_context_; //!
cl::CommandQueue opencl_command_queue_; //!
// Currently only support for one GPU device
std::vector<cl::Device> opencl_devices_; //!
std::vector<cl::Program> opencl_programs_; //!
#endif
friend class SimulationBackup;
ClassDefNV(ResourceManager, 1);
};
template <typename T>
std::unique_ptr<ResourceManager<T>> ResourceManager<T>::instance_ =
std::unique_ptr<ResourceManager<T>>(new ResourceManager<T>());
/// Returns the ResourceManager
template <typename TResourceManager = ResourceManager<>>
TResourceManager* Rm() {
return TResourceManager::Get();
}
} // namespace bdm
#endif // RESOURCE_MANAGER_H_
|
main.c | #include <stdint.h>
#include "omp.h"
#include "common.h"
//NOTE Make your configuration here
#define NUM_THREADS 8U
#define ITERS 3U
#define CHECK
// #define VERBOSE
#include "data/input.h"
static inline int32_t
checkResults(int32_t *output, uint32_t nbNodes)
{
uint32_t checksum = 0;
int32_t ret = 1U; //1 == fail
uint32_t i = 0, j = 0;
#ifdef VERBOSE
printf("[bfs] Checksumming...\n");
#endif
for(i = 0 ; i < nbNodes ; i++)
{
#ifdef OUTPUT
printf("%x \n", output[i]);
#endif
checksum += output[i];
}
#ifdef VERBOSE
printf("[bfs] Checksum 0x%x\n", (int)checksum);
#endif
if(CHECKSUM)
{
if(CHECKSUM == checksum)
{
#ifdef VERBOSE
printf("[bfs] Check...[" ANSI_COLOR_GREEN "SUCCESS" ANSI_COLOR_RESET "]\n");
#endif
ret = 0;
}
else
{
printf("[bfs] Check...[" ANSI_COLOR_RED "FAIL" ANSI_COLOR_RESET "]\n");
printf("[bfs] Checksum 0x%x\n", (int) checksum);
ret = 1;
}
}
return ret;
}
////////////////////////////////////////////////////////////////////////////////
//Apply BFS on a Graph using CUDA
////////////////////////////////////////////////////////////////////////////////
int main() {
uint32_t iter;
int32_t ret = -1; //1 == fail
// allocate host memory
uint8_t *h_graph_mask = (uint8_t*) l1malloc(sizeof(uint8_t)*no_of_nodes);
uint8_t *h_updating_graph_mask = (uint8_t*) l1malloc(sizeof(uint8_t)*no_of_nodes);
uint8_t *h_graph_visited = (uint8_t*) l1malloc(sizeof(uint8_t)*no_of_nodes);
// allocate mem for the result on host side
int32_t* h_cost = (int32_t*)l1malloc( sizeof(int32_t)*no_of_nodes);
int start, edgeno, i;
for(iter = 0; iter < ITERS; ++iter)
{
// initalize the memory
for(i = 0; i < no_of_nodes; i++)
{
h_graph_mask[i]=false;
h_updating_graph_mask[i]=false;
h_graph_visited[i]=false;
}
//set the source node as true in the mask
h_graph_mask[source]=true;
h_graph_visited[source]=true;
for(i=0;i<no_of_nodes;i++)
h_cost[i]=-1;
h_cost[source]=0;
#ifdef VERBOSE
printf("[bfs] start traversing the tree size=%d...\n", DIM);
#endif
profile_start(iter);
int k=0;
uint8_t stop;
do
{
int tid;
//if no thread changes this value then the loop stops
stop=false;
#pragma omp parallel for private(tid) private(i)
for(tid = 0; tid < no_of_nodes; tid++ )
{
if (h_graph_mask[tid] == true){
h_graph_mask[tid]=false;
for(i=h_graph_nodes[tid].starting; i<(h_graph_nodes[tid].no_of_edges + h_graph_nodes[tid].starting); i++)
{
int id = h_graph_edges[i];
if(!h_graph_visited[id])
{
h_cost[id]=h_cost[tid]+1;
h_updating_graph_mask[id]=true;
}
}
}
}
for(tid=0; tid< no_of_nodes ; tid++ )
{
if (h_updating_graph_mask[tid] == true){
h_graph_mask[tid]=true;
h_graph_visited[tid]=true;
stop=true;
h_updating_graph_mask[tid]=false;
}
}
k++;
}
while(stop);
#ifdef VERBOSE
printf("[bfs] start traversing done!\n");
#endif
profile_stop(iter);
#ifdef CHECK
ret = checkResults(h_cost, no_of_nodes);
if(ret) break;
#endif
}
profile_show();
// cleanup memory
l1free( h_graph_mask);
l1free( h_updating_graph_mask);
l1free( h_graph_visited);
l1free( h_cost);
return ret;
}
|
parallel_for_simd_misc_messages.c | // RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=45 -verify=expected,omp45 %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp -verify=expected,omp50 %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=45 -verify=expected,omp45 %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -verify=expected,omp50 %s -Wuninitialized
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp parallel for simd'}}
#pragma omp parallel for simd
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp parallel for simd'}}
#pragma omp parallel for simd foo
void test_no_clause() {
int i;
#pragma omp parallel for simd
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{statement after '#pragma omp parallel for simd' must be a for loop}}
#pragma omp parallel for simd
++i;
}
void test_branch_protected_scope() {
int i = 0;
L1:
++i;
int x[24];
#pragma omp parallel
#pragma omp parallel for simd
for (i = 0; i < 16; ++i) {
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause() {
int i;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}}
#pragma omp parallel for simd foo bar
for (i = 0; i < 16; ++i)
;
}
void test_non_identifiers() {
int i, x;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}}
#pragma omp parallel for simd;
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}}
#pragma omp parallel for simd linear(x);
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}}
#pragma omp parallel for simd private(x);
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}}
#pragma omp parallel for simd, private(x);
for (i = 0; i < 16; ++i)
;
}
extern int foo();
void test_safelen() {
int i;
// expected-error@+1 {{expected '('}}
#pragma omp parallel for simd safelen
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd safelen(
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd safelen()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd safelen(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd safelen(, )
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp parallel for simd safelen 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd safelen(4
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd safelen(4,
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd safelen(4, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel for simd safelen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd safelen(4 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd safelen(4, , 4)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel for simd safelen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd safelen(4, 8)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{integer constant expression}}
#pragma omp parallel for simd safelen(2.5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{integer constant expression}}
#pragma omp parallel for simd safelen(foo())
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}}
#pragma omp parallel for simd safelen(-5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}}
#pragma omp parallel for simd safelen(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}}
#pragma omp parallel for simd safelen(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_simdlen() {
int i;
// expected-error@+1 {{expected '('}}
#pragma omp parallel for simd simdlen
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd simdlen(
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd simdlen()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd simdlen(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd simdlen(, )
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp parallel for simd simdlen 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd simdlen(4
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd simdlen(4,
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd simdlen(4, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel for simd simdlen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd simdlen(4 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd simdlen(4, , 4)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel for simd simdlen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd simdlen(4, 8)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{integer constant expression}}
#pragma omp parallel for simd simdlen(2.5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{integer constant expression}}
#pragma omp parallel for simd simdlen(foo())
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}}
#pragma omp parallel for simd simdlen(-5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}}
#pragma omp parallel for simd simdlen(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}}
#pragma omp parallel for simd simdlen(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_safelen_simdlen() {
int i;
// expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}}
#pragma omp parallel for simd simdlen(6) safelen(5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}}
#pragma omp parallel for simd safelen(5) simdlen(6)
for (i = 0; i < 16; ++i)
;
}
void test_collapse() {
int i;
#pragma omp parallel
// expected-error@+1 {{expected '('}}
#pragma omp parallel for simd collapse
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd collapse(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd collapse()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd collapse(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd collapse(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+2 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp parallel for simd collapse 4)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel for simd collapse(4
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel for simd collapse(4,
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel for simd collapse(4, )
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}}
#pragma omp parallel
// expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel for simd collapse(4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel for simd collapse(4 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel for simd collapse(4, , 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}}
#pragma omp parallel
#pragma omp parallel for simd collapse(4)
for (int i1 = 0; i1 < 16; ++i1)
for (int i2 = 0; i2 < 16; ++i2)
for (int i3 = 0; i3 < 16; ++i3)
for (int i4 = 0; i4 < 16; ++i4)
foo();
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel for simd collapse(4, 8)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}}
#pragma omp parallel
// expected-error@+1 {{integer constant expression}}
#pragma omp parallel for simd collapse(2.5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{integer constant expression}}
#pragma omp parallel for simd collapse(foo())
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp parallel for simd collapse(-5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp parallel for simd collapse(0)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp parallel for simd collapse(5 - 5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp parallel for simd collapse(2)
for (i = 0; i < 16; ++i)
for (int j = 0; j < 16; ++j)
// expected-error@+1 {{OpenMP constructs may not be nested inside a simd region}}
#pragma omp parallel for simd reduction(+ : i, j)
for (int k = 0; k < 16; ++k)
i += j;
}
void test_linear() {
int i;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd linear(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd linear(,
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd linear(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd linear()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd linear(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp parallel for simd linear(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{use of undeclared identifier 'x'}}
#pragma omp parallel for simd linear(x)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{use of undeclared identifier 'x'}}
// expected-error@+1 {{use of undeclared identifier 'y'}}
#pragma omp parallel for simd linear(x, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+3 {{use of undeclared identifier 'x'}}
// expected-error@+2 {{use of undeclared identifier 'y'}}
// expected-error@+1 {{use of undeclared identifier 'z'}}
#pragma omp parallel for simd linear(x, y, z)
for (i = 0; i < 16; ++i)
;
int x, y;
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd linear(x :)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd linear(x :, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel for simd linear(x : 1)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel for simd linear(x : 2 * 2)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd linear(x : 1, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd linear(x : 1, y, z : 1)
for (i = 0; i < 16; ++i)
;
// expected-note@+2 {{defined as linear}}
// expected-error@+1 {{linear variable cannot be linear}}
#pragma omp parallel for simd linear(x) linear(x)
for (i = 0; i < 16; ++i)
;
// expected-note@+2 {{defined as private}}
// expected-error@+1 {{private variable cannot be linear}}
#pragma omp parallel for simd private(x) linear(x)
for (i = 0; i < 16; ++i)
;
// expected-note@+2 {{defined as linear}}
// expected-error@+1 {{linear variable cannot be private}}
#pragma omp parallel for simd linear(x) private(x)
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{zero linear step (x and other variables in clause should probably be const)}}
#pragma omp parallel for simd linear(x, y : 0)
for (i = 0; i < 16; ++i)
;
// expected-note@+2 {{defined as linear}}
// expected-error@+1 {{linear variable cannot be lastprivate}}
#pragma omp parallel for simd linear(x) lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-note@+2 {{defined as lastprivate}}
// expected-error@+1 {{lastprivate variable cannot be linear}}
#pragma omp parallel for simd lastprivate(x) linear(x)
for (i = 0; i < 16; ++i)
;
}
void test_aligned() {
int i;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd aligned(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd aligned(,
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd aligned(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd aligned()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd aligned(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp parallel for simd aligned(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{use of undeclared identifier 'x'}}
#pragma omp parallel for simd aligned(x)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{use of undeclared identifier 'x'}}
// expected-error@+1 {{use of undeclared identifier 'y'}}
#pragma omp parallel for simd aligned(x, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+3 {{use of undeclared identifier 'x'}}
// expected-error@+2 {{use of undeclared identifier 'y'}}
// expected-error@+1 {{use of undeclared identifier 'z'}}
#pragma omp parallel for simd aligned(x, y, z)
for (i = 0; i < 16; ++i)
;
int *x, y, z[25]; // expected-note 4 {{'y' defined here}}
#pragma omp parallel for simd aligned(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel for simd aligned(z)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd aligned(x :)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd aligned(x :, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel for simd aligned(x : 1)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel for simd aligned(x : 2 * 2)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd aligned(x : 1, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd aligned(x : 1, y, z : 1)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}}
#pragma omp parallel for simd aligned(x, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}}
#pragma omp parallel for simd aligned(x, y, z)
for (i = 0; i < 16; ++i)
;
// expected-note@+2 {{defined as aligned}}
// expected-error@+1 {{a variable cannot appear in more than one aligned clause}}
#pragma omp parallel for simd aligned(x) aligned(z, x)
for (i = 0; i < 16; ++i)
;
// expected-note@+3 {{defined as aligned}}
// expected-error@+2 {{a variable cannot appear in more than one aligned clause}}
// expected-error@+1 2 {{argument of aligned clause should be array or pointer, not 'int'}}
#pragma omp parallel for simd aligned(x, y, z) aligned(y, z)
for (i = 0; i < 16; ++i)
;
}
void test_private() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd private(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel for simd private(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel for simd private(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd private()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd private(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp parallel for simd private(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp parallel for simd private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp parallel for simd private(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp parallel for simd private(x, y, z)
for (i = 0; i < 16; ++i) {
x = y * i + z;
}
}
void test_lastprivate() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd lastprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel for simd lastprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel for simd lastprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd lastprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd lastprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp parallel for simd lastprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp parallel for simd lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp parallel for simd lastprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp parallel for simd lastprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_firstprivate() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd firstprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel for simd firstprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel for simd firstprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd firstprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp parallel for simd firstprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp parallel for simd firstprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp parallel for simd lastprivate(x) firstprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp parallel for simd lastprivate(x, y) firstprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp parallel for simd lastprivate(x, y, z) firstprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_loop_messages() {
float a[100], b[100], c[100];
#pragma omp parallel
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp parallel for simd
for (float fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
#pragma omp parallel
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp parallel for simd
for (double fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
}
void test_nontemporal() {
int i;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd nontemporal(
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-error@+1 2 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd nontemporal(,
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-error@+1 2 {{expected expression}}
#pragma omp parallel for simd nontemporal(, )
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-error@+1 {{expected expression}}
#pragma omp parallel for simd nontemporal()
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-error@+1 {{expected expression}}
#pragma omp parallel for simd nontemporal(int)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} omp50-error@+1 {{expected variable name}}
#pragma omp parallel for simd nontemporal(0)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-error@+1 {{use of undeclared identifier 'x'}}
#pragma omp parallel for simd nontemporal(x)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{use of undeclared identifier 'x'}}
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-error@+1 {{use of undeclared identifier 'y'}}
#pragma omp parallel for simd nontemporal(x, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+3 {{use of undeclared identifier 'x'}}
// expected-error@+2 {{use of undeclared identifier 'y'}}
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-error@+1 {{use of undeclared identifier 'z'}}
#pragma omp parallel for simd nontemporal(x, y, z)
for (i = 0; i < 16; ++i)
;
int x, y;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for simd nontemporal(x :)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}}
#pragma omp parallel for simd nontemporal(x :, )
for (i = 0; i < 16; ++i)
;
// omp50-note@+2 {{defined as nontemporal}}
// omp45-error@+1 2 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} omp50-error@+1 {{a variable cannot appear in more than one nontemporal clause}}
#pragma omp parallel for simd nontemporal(x) nontemporal(x)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}}
#pragma omp parallel for simd private(x) nontemporal(x)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}}
#pragma omp parallel for simd nontemporal(x) private(x)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}}
#pragma omp parallel for simd nontemporal(x, y : 0)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}}
#pragma omp parallel for simd nontemporal(x) lastprivate(x)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}}
#pragma omp parallel for simd lastprivate(x) nontemporal(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel for simd order // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp parallel for simd'}} expected-error {{expected '(' after 'order'}}
for (int i = 0; i < 10; ++i)
;
#pragma omp parallel for simd order( // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp parallel for simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}} omp50-error {{expected 'concurrent' in OpenMP clause 'order'}}
for (int i = 0; i < 10; ++i)
;
#pragma omp parallel for simd order(none // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp parallel for simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}} omp50-error {{expected 'concurrent' in OpenMP clause 'order'}}
for (int i = 0; i < 10; ++i)
;
#pragma omp parallel for simd order(concurrent // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp parallel for simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}}
for (int i = 0; i < 10; ++i)
;
#pragma omp parallel for simd order(concurrent) // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp parallel for simd'}}
for (int i = 0; i < 10; ++i)
;
}
|
test_verify_tables.c | #include "config.h"
#include <limits.h>
#include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <unistd.h>
#include "kseq.h"
KSEQ_INIT(int, read)
#include "parasail.h"
#include "parasail/cpuid.h"
#include "parasail/memory.h"
#include "parasail/matrix_lookup.h"
#include "func_verify_tables.h"
static int verbose = 0;
typedef struct gap_score {
int open;
int extend;
} gap_score_t;
gap_score_t gap_scores[] = {
{10,1},
{10,2},
{14,2},
{40,2},
{INT_MIN,INT_MIN}
};
static inline void parse_sequences(
const char *filename,
char ***strings_,
unsigned long **sizes_,
unsigned long *count_)
{
FILE* fp;
kseq_t *seq = NULL;
int l = 0;
char **strings = NULL;
unsigned long *sizes = NULL;
unsigned long count = 0;
unsigned long memory = 1000;
fp = fopen(filename, "r");
if(fp == NULL) {
perror("fopen");
exit(1);
}
strings = malloc(sizeof(char*) * memory);
sizes = malloc(sizeof(unsigned long) * memory);
seq = kseq_init(fileno(fp));
while ((l = kseq_read(seq)) >= 0) {
strings[count] = strdup(seq->seq.s);
if (NULL == strings[count]) {
perror("strdup");
exit(1);
}
sizes[count] = seq->seq.l;
++count;
if (count >= memory) {
char **new_strings = NULL;
unsigned long *new_sizes = NULL;
memory *= 2;
new_strings = realloc(strings, sizeof(char*) * memory);
if (NULL == new_strings) {
perror("realloc");
exit(1);
}
strings = new_strings;
new_sizes = realloc(sizes, sizeof(unsigned long) * memory);
if (NULL == new_sizes) {
perror("realloc");
exit(1);
}
sizes = new_sizes;
}
}
kseq_destroy(seq);
fclose(fp);
*strings_ = strings;
*sizes_ = sizes;
*count_ = count;
}
static inline unsigned long binomial_coefficient(
unsigned long n,
unsigned long k)
{
/* from http://blog.plover.com/math/choose.html */
unsigned long r = 1;
unsigned long d;
if (k > n) {
return 0;
}
for (d = 1; d <= k; d++) {
r *= n--;
r /= d;
}
return r;
}
static inline void k_combination2(
unsigned long pos,
unsigned long *a,
unsigned long *b)
{
double s;
double i = floor(sqrt(2.0 * pos)) - 1.0;
if (i <= 1.0) {
i = 1.0;
}
s = i * (i - 1.0) / 2.0;
while (pos - s >= i) {
s += i;
i += 1;
}
*a = (unsigned long)(pos - s);
*b = (unsigned long)(i);
}
static inline int diff_array(
unsigned long s1Len,
unsigned long s2Len,
int *a,
int *b)
{
unsigned long i = 0;
unsigned long size = s1Len * s2Len;
for (i=0; i<size; ++i) {
if (a[i] != b[i]) return 1;
}
return 0;
}
static void check_functions(
parasail_function_group_t f,
char **sequences,
unsigned long *sizes,
unsigned long pair_limit,
const parasail_matrix_t *matrix_,
gap_score_t gap)
{
const parasail_function_info_t *functions = f.fs;
unsigned long matrix_index = 0;
unsigned long gap_index = 0;
unsigned long function_index = 0;
unsigned long pair_index = 0;
parasail_function_t *reference_function = NULL;
const parasail_matrix_t ** matrices = parasail_matrices;
const parasail_matrix_t * single_matrix[] = {
matrix_,
NULL
};
if (NULL != matrix_) {
matrices = single_matrix;
}
printf("checking %s functions\n", f.name);
for (matrix_index=0; NULL!=matrices[matrix_index]; ++matrix_index) {
const parasail_matrix_t *matrix = matrices[matrix_index];
const char *matrixname = matrix->name;
if (verbose) printf("\t%s\n", matrixname);
for (gap_index=0; INT_MIN!=gap_scores[gap_index].open; ++gap_index) {
int open = gap_scores[gap_index].open;
int extend = gap_scores[gap_index].extend;
if (gap.open != INT_MIN && gap.extend != INT_MIN) {
open = gap.open;
extend = gap.extend;
}
if (verbose) printf("\t\topen=%d extend=%d\n", open, extend);
reference_function = functions[0].pointer;
for (function_index=1;
NULL!=functions[function_index].pointer;
++function_index) {
if (verbose) printf("\t\t\t%s\n", functions[function_index].name);
unsigned long saturated = 0;
#pragma omp parallel for
for (pair_index=0; pair_index<pair_limit; ++pair_index) {
parasail_result_t *reference_result = NULL;
parasail_result_t *result = NULL;
unsigned long a = 0;
unsigned long b = 1;
k_combination2(pair_index, &a, &b);
if (verbose) printf("\t\t\t\tpair=%lu (%lu,%lu)\n", pair_index, a, b);
reference_result = reference_function(
sequences[a], sizes[a],
sequences[b], sizes[b],
open, extend,
matrix);
result = functions[function_index].pointer(
sequences[a], sizes[a],
sequences[b], sizes[b],
open, extend,
matrix);
if (result->saturated) {
/* no point in comparing a result that saturated */
parasail_result_free(reference_result);
parasail_result_free(result);
#pragma omp atomic
saturated += 1;
continue;
}
if (reference_result->score != result->score) {
#pragma omp critical(printer)
{
printf("%s(%lu,%lu,%d,%d,%s) wrong score (%d!=%d)\n",
functions[function_index].name,
a, b, open, extend,
matrixname,
reference_result->score, result->score);
}
}
if (diff_array(
sizes[a], sizes[b],
reference_result->score_table,
result->score_table)) {
#pragma omp critical(printer)
{
printf("%s(%lu,%lu,%d,%d,%s) bad score table\n",
functions[function_index].name,
a, b, open, extend,
matrixname);
}
}
if (reference_result->matches_table
&& diff_array(
sizes[a], sizes[b],
reference_result->matches_table,
result->matches_table)) {
#pragma omp critical(printer)
{
printf("%s(%lu,%lu,%d,%d,%s) bad matches table\n",
functions[function_index].name,
a, b, open, extend,
matrixname);
}
}
if (reference_result->similar_table
&& diff_array(
sizes[a], sizes[b],
reference_result->similar_table,
result->similar_table)) {
#pragma omp critical(printer)
{
printf("%s(%lu,%lu,%d,%d,%s) bad similar table\n",
functions[function_index].name,
a, b, open, extend,
matrixname);
}
}
if (reference_result->length_table
&& diff_array(
sizes[a], sizes[b],
reference_result->length_table,
result->length_table)) {
#pragma omp critical(printer)
{
printf("%s(%lu,%lu,%d,%d,%s) bad length table\n",
functions[function_index].name,
a, b, open, extend,
matrixname);
}
}
parasail_result_free(reference_result);
parasail_result_free(result);
}
if (verbose && saturated) {
printf("%s %d %d %s saturated %lu times\n",
functions[function_index].name,
open, extend,
matrixname,
saturated);
}
}
if (gap.open != INT_MIN && gap.extend != INT_MIN) {
/* user-specified gap, don't loop */
break;
}
}
}
}
int main(int argc, char **argv)
{
unsigned long i = 0;
unsigned long seq_count = 0;
unsigned long limit = 0;
char **sequences = NULL;
unsigned long *sizes = NULL;
char *endptr = NULL;
char *filename = NULL;
int c = 0;
int test_scores = 1;
int test_stats = 0;
char *matrixname = NULL;
const parasail_matrix_t *matrix = NULL;
gap_score_t gap = {INT_MIN,INT_MIN};
while ((c = getopt(argc, argv, "f:m:n:o:e:vsS")) != -1) {
switch (c) {
case 'f':
filename = optarg;
break;
case 'm':
matrixname = optarg;
break;
case 'n':
errno = 0;
seq_count = strtol(optarg, &endptr, 10);
if (errno) {
perror("strtol");
exit(1);
}
break;
case 'o':
errno = 0;
gap.open = strtol(optarg, &endptr, 10);
if (errno) {
perror("strtol gap.open");
exit(1);
}
break;
case 'e':
errno = 0;
gap.extend = strtol(optarg, &endptr, 10);
if (errno) {
perror("strtol gap.extend");
exit(1);
}
break;
case 'v':
verbose = 1;
break;
case 's':
test_stats = 1;
break;
case 'S':
test_scores = 0;
break;
case '?':
if (optopt == 'f' || optopt == 'n') {
fprintf(stderr,
"Option -%c requires an argument.\n",
optopt);
}
else if (isprint(optopt)) {
fprintf(stderr, "Unknown option `-%c'.\n",
optopt);
}
else {
fprintf(stderr,
"Unknown option character `\\x%x'.\n",
optopt);
}
exit(1);
default:
fprintf(stderr, "default case in getopt\n");
exit(1);
}
}
if (filename) {
parse_sequences(filename, &sequences, &sizes, &seq_count);
}
else {
fprintf(stderr, "no filename specified\n");
exit(1);
}
/* select the matrix */
if (matrixname) {
matrix = parasail_matrix_lookup(matrixname);
if (NULL == matrix) {
fprintf(stderr, "Specified substitution matrix not found.\n");
exit(1);
}
}
limit = binomial_coefficient(seq_count, 2);
printf("%lu choose 2 is %lu\n", seq_count, limit);
#if HAVE_SSE2
if (parasail_can_use_sse2()) {
if (test_scores) {
check_functions(parasail_nw_table_sse2, sequences, sizes, limit, matrix, gap);
check_functions(parasail_sg_table_sse2, sequences, sizes, limit, matrix, gap);
check_functions(parasail_sw_table_sse2, sequences, sizes, limit, matrix, gap);
}
if (test_stats) {
check_functions(parasail_nw_stats_table_sse2, sequences, sizes, limit, matrix, gap);
check_functions(parasail_sg_stats_table_sse2, sequences, sizes, limit, matrix, gap);
check_functions(parasail_sw_stats_table_sse2, sequences, sizes, limit, matrix, gap);
}
}
#endif
#if HAVE_SSE41
if (parasail_can_use_sse41()) {
if (test_scores) {
check_functions(parasail_nw_table_sse41, sequences, sizes, limit, matrix, gap);
check_functions(parasail_sg_table_sse41, sequences, sizes, limit, matrix, gap);
check_functions(parasail_sw_table_sse41, sequences, sizes, limit, matrix, gap);
}
if (test_stats) {
check_functions(parasail_nw_stats_table_sse41, sequences, sizes, limit, matrix, gap);
check_functions(parasail_sg_stats_table_sse41, sequences, sizes, limit, matrix, gap);
check_functions(parasail_sw_stats_table_sse41, sequences, sizes, limit, matrix, gap);
}
}
#endif
#if HAVE_AVX2
if (parasail_can_use_avx2()) {
if (test_scores) {
check_functions(parasail_nw_table_avx2, sequences, sizes, limit, matrix, gap);
check_functions(parasail_sg_table_avx2, sequences, sizes, limit, matrix, gap);
check_functions(parasail_sw_table_avx2, sequences, sizes, limit, matrix, gap);
}
if (test_stats) {
check_functions(parasail_nw_stats_table_avx2, sequences, sizes, limit, matrix, gap);
check_functions(parasail_sg_stats_table_avx2, sequences, sizes, limit, matrix, gap);
check_functions(parasail_sw_stats_table_avx2, sequences, sizes, limit, matrix, gap);
}
}
#endif
#if HAVE_KNC
{
if (test_scores) {
check_functions(parasail_nw_table_knc, sequences, sizes, limit, matrix, gap);
check_functions(parasail_sg_table_knc, sequences, sizes, limit, matrix, gap);
check_functions(parasail_sw_table_knc, sequences, sizes, limit, matrix, gap);
}
if (test_stats) {
check_functions(parasail_nw_stats_table_knc, sequences, sizes, limit, matrix, gap);
check_functions(parasail_sg_stats_table_knc, sequences, sizes, limit, matrix, gap);
check_functions(parasail_sw_stats_table_knc, sequences, sizes, limit, matrix, gap);
}
}
#endif
if (test_scores) {
check_functions(parasail_nw_table_disp, sequences, sizes, limit, matrix, gap);
check_functions(parasail_sg_table_disp, sequences, sizes, limit, matrix, gap);
check_functions(parasail_sw_table_disp, sequences, sizes, limit, matrix, gap);
}
if (test_stats) {
check_functions(parasail_nw_stats_table_disp, sequences, sizes, limit, matrix, gap);
check_functions(parasail_sg_stats_table_disp, sequences, sizes, limit, matrix, gap);
check_functions(parasail_sw_stats_table_disp, sequences, sizes, limit, matrix, gap);
}
for (i=0; i<seq_count; ++i) {
free(sequences[i]);
}
free(sequences);
free(sizes);
return 0;
}
|
GB_unaryop__minv_uint64_uint8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_uint64_uint8
// op(A') function: GB_tran__minv_uint64_uint8
// C type: uint64_t
// A type: uint8_t
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 64)
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 64) ;
// casting
#define GB_CASTING(z, x) \
uint64_t z = (uint64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT64 || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_uint64_uint8
(
uint64_t *restrict Cx,
const uint8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_uint64_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
familytree_par.c | #include "familytree.h"
#include <omp.h>
int parallel_traverse(tree *node) {
if (node == NULL) return 0;
int father_iq, mother_iq;
#pragma omp task shared(father_iq)
father_iq = parallel_traverse(node->father);
mother_iq = parallel_traverse(node->mother);
#pragma omp taskwait
node->IQ = compute_IQ(node->data, father_iq, mother_iq);
genius[node->id] = node->IQ;
return node->IQ;
}
int traverse(tree *node, int numThreads){
#pragma omp parallel num_threads(numThreads)
{
#pragma omp single
parallel_traverse(node);
}
return node->IQ;
} |
arraybench.c | /****************************************************************************
* *
* OpenMP MicroBenchmark Suite - Version 3.1 *
* *
* produced by *
* *
* Mark Bull, Fiona Reid and Nix Mc Donnell *
* *
* at *
* *
* Edinburgh Parallel Computing Centre *
* *
* email: markb@epcc.ed.ac.uk or fiona@epcc.ed.ac.uk *
* *
* *
* This version copyright (c) The University of Edinburgh, 2015. *
* *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); *
* you may not use this file except in compliance with the License. *
* You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, *
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
* See the License for the specific language governing permissions and *
* limitations under the License. *
* *
****************************************************************************/
#include "common.h"
#include "arraybench.h"
double btest[IDA];
double atest[IDA];
#pragma omp threadprivate (btest)
int arraybench_main(int argc, char **argv) {
ompbench_init(argc, argv);
/* GENERATE REFERENCE TIME */
reference("reference time 1", &refer);
char testName[32];
/* TEST PRIVATE */
sprintf(testName, "PRIVATE %d", IDA);
benchmark(testName, &testprivnew);
/* TEST FIRSTPRIVATE */
sprintf(testName, "FIRSTPRIVATE %d", IDA);
benchmark(testName, &testfirstprivnew);
#ifdef OMPVER2
/* TEST COPYPRIVATE */
sprintf(testName, "COPYPRIVATE %d", IDA);
benchmark(testName, &testcopyprivnew);
#endif
#if 1
/* TEST THREADPRIVATE - COPYIN */
sprintf(testName, "COPYIN %d", IDA);
benchmark(testName, &testthrprivnew);
#endif
finalise();
return EXIT_SUCCESS;
}
static void refer() {
int j;
double a[1];
for (j = 0; j < innerreps; j++) {
array_delay(delaylength, a);
}
}
void testfirstprivnew() {
int j;
for (j = 0; j < innerreps; j++) {
#pragma omp parallel firstprivate(atest)
{
array_delay(delaylength, atest);
}
}
}
void testprivnew() {
int j;
for (j = 0; j < innerreps; j++) {
#pragma omp parallel private(atest)
{
array_delay(delaylength, atest);
}
}
}
#ifdef OMPVER2
void testcopyprivnew()
{
int j;
for (j=0; j<innerreps; j++) {
#pragma omp parallel private(atest)
{
#pragma omp single copyprivate(atest)
{
array_delay(delaylength, atest);
}
}
}
}
#endif
void testthrprivnew() {
int j;
for (j = 0; j < innerreps; j++) {
#pragma omp parallel copyin(btest)
{
array_delay(delaylength, btest);
}
}
}
|
lu.c | /*--------------------------------------------------------------------
NAS Parallel Benchmarks 3.0 structured OpenMP C versions - LU
This benchmark is an OpenMP C version of the NPB LU code.
The OpenMP C 2.3 versions are derived by RWCP from the serial Fortran versions
in "NPB 2.3-serial" developed by NAS. 3.0 translation is performed by the UVSQ.
Permission to use, copy, distribute and modify this software for any
purpose with or without fee is hereby granted.
This software is provided "as is" without express or implied warranty.
Information on OpenMP activities at RWCP is available at:
http://pdplab.trc.rwcp.or.jp/pdperf/Omni/
Information on NAS Parallel Benchmarks 2.3 is available at:
http://www.nas.nasa.gov/NAS/NPB/
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
Authors: S. Weeratunga
V. Venkatakrishnan
E. Barszcz
M. Yarrow
OpenMP C version: S. Satoh
3.0 structure translation: M. Popov
--------------------------------------------------------------------*/
#include "../common/npb-C.h"
/* global variables */
#include "applu.h"
#if defined(_OPENMP)
/* for thread synchronization */
static boolean flag[ISIZ1/2*2+1];
#endif /* _OPENMP */
/* function declarations */
static void blts (int nx, int ny, int nz, int k,
double omega,
double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5],
double ldz[ISIZ1][ISIZ2][5][5],
double ldy[ISIZ1][ISIZ2][5][5],
double ldx[ISIZ1][ISIZ2][5][5],
double d[ISIZ1][ISIZ2][5][5],
int ist, int iend, int jst, int jend,
int nx0, int ny0 );
static void buts(int nx, int ny, int nz, int k,
double omega,
double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5],
double tv[ISIZ1][ISIZ2][5],
double d[ISIZ1][ISIZ2][5][5],
double udx[ISIZ1][ISIZ2][5][5],
double udy[ISIZ1][ISIZ2][5][5],
double udz[ISIZ1][ISIZ2][5][5],
int ist, int iend, int jst, int jend,
int nx0, int ny0 );
static void domain(void);
static void erhs(void);
static void error(void);
static void exact( int i, int j, int k, double u000ijk[5] );
static void jacld(int k);
static void jacu(int k);
static void l2norm (int nx0, int ny0, int nz0,
int ist, int iend,
int jst, int jend,
double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5],
double sum[5]);
static void pintgr(void);
static void read_input(void);
static void rhs(void);
static void setbv(void);
static void setcoeff(void);
static void setiv(void);
static void ssor(void);
static void verify(double xcr[5], double xce[5], double xci,
char *class, boolean *verified);
/*--------------------------------------------------------------------
program applu
--------------------------------------------------------------------*/
int main(int argc, char **argv) {
/*--------------------------------------------------------------------
c
c driver for the performance evaluation of the solver for
c five coupled parabolic/elliptic partial differential equations.
c
--------------------------------------------------------------------*/
char class;
boolean verified;
double mflops;
int nthreads = 1;
/*--------------------------------------------------------------------
c read input data
--------------------------------------------------------------------*/
read_input();
/*--------------------------------------------------------------------
c set up domain sizes
--------------------------------------------------------------------*/
domain();
/*--------------------------------------------------------------------
c set up coefficients
--------------------------------------------------------------------*/
setcoeff();
/*--------------------------------------------------------------------
c set the boundary values for dependent variables
--------------------------------------------------------------------*/
setbv();
/*--------------------------------------------------------------------
c set the initial values for dependent variables
--------------------------------------------------------------------*/
setiv();
/*--------------------------------------------------------------------
c compute the forcing term based on prescribed exact solution
--------------------------------------------------------------------*/
erhs();
#pragma omp parallel
{
#if defined(_OPENMP)
#pragma omp master
nthreads = omp_get_num_threads();
#endif /* _OPENMP */
}
/*--------------------------------------------------------------------
c perform the SSOR iterations
--------------------------------------------------------------------*/
ssor();
/*--------------------------------------------------------------------
c compute the solution error
--------------------------------------------------------------------*/
error();
/*--------------------------------------------------------------------
c compute the surface integral
--------------------------------------------------------------------*/
pintgr();
/*--------------------------------------------------------------------
c verification test
--------------------------------------------------------------------*/
verify ( rsdnm, errnm, frc, &class, &verified );
mflops = (double)itmax*(1984.77*(double)nx0
*(double)ny0
*(double)nz0
-10923.3*pow2((double)( nx0+ny0+nz0 )/3.0)
+27770.9* (double)( nx0+ny0+nz0 )/3.0
-144010.0)
/ (maxtime*1000000.0);
c_print_results("LU", class, nx0,
ny0, nz0, itmax, nthreads,
maxtime, mflops, " floating point", verified,
NPBVERSION, COMPILETIME, CS1, CS2, CS3, CS4, CS5, CS6,
"(none)");
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void blts (int nx, int ny, int nz, int k,
double omega,
/*--------------------------------------------------------------------
c To improve cache performance, second two dimensions padded by 1
c for even number sizes only. Only needed in v.
--------------------------------------------------------------------*/
double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5],
double ldz[ISIZ1][ISIZ2][5][5],
double ldy[ISIZ1][ISIZ2][5][5],
double ldx[ISIZ1][ISIZ2][5][5],
double d[ISIZ1][ISIZ2][5][5],
int ist, int iend, int jst, int jend,
int nx0, int ny0 ) {
/*--------------------------------------------------------------------
c
c compute the regular-sparse, block lower triangular solution:
c
c v <-- ( L-inv ) * v
c
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, m;
double tmp, tmp1;
double tmat[5][5];
#pragma omp for nowait schedule(static)
for (i = ist; i <= iend; i++) {
for (j = jst; j <= jend; j++) {
for (m = 0; m < 5; m++) {
v[i][j][k][m] = v[i][j][k][m]
- omega * ( ldz[i][j][m][0] * v[i][j][k-1][0]
+ ldz[i][j][m][1] * v[i][j][k-1][1]
+ ldz[i][j][m][2] * v[i][j][k-1][2]
+ ldz[i][j][m][3] * v[i][j][k-1][3]
+ ldz[i][j][m][4] * v[i][j][k-1][4] );
}
}
}
#pragma omp for nowait schedule(static)
for (i = ist; i <= iend; i++) {
#if defined(_OPENMP)
if (i != ist) {
while (flag[i-1] == 0) {
#pragma omp flush(flag)
;
}
}
if (i != iend) {
while (flag[i] == 1) {
#pragma omp flush(flag)
;
}
}
#endif /* _OPENMP */
for (j = jst; j <= jend; j++) {
for (m = 0; m < 5; m++) {
v[i][j][k][m] = v[i][j][k][m]
- omega * ( ldy[i][j][m][0] * v[i][j-1][k][0]
+ ldx[i][j][m][0] * v[i-1][j][k][0]
+ ldy[i][j][m][1] * v[i][j-1][k][1]
+ ldx[i][j][m][1] * v[i-1][j][k][1]
+ ldy[i][j][m][2] * v[i][j-1][k][2]
+ ldx[i][j][m][2] * v[i-1][j][k][2]
+ ldy[i][j][m][3] * v[i][j-1][k][3]
+ ldx[i][j][m][3] * v[i-1][j][k][3]
+ ldy[i][j][m][4] * v[i][j-1][k][4]
+ ldx[i][j][m][4] * v[i-1][j][k][4] );
}
/*--------------------------------------------------------------------
c diagonal block inversion
c
c forward elimination
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
tmat[m][0] = d[i][j][m][0];
tmat[m][1] = d[i][j][m][1];
tmat[m][2] = d[i][j][m][2];
tmat[m][3] = d[i][j][m][3];
tmat[m][4] = d[i][j][m][4];
}
tmp1 = 1.0 / tmat[0][0];
tmp = tmp1 * tmat[1][0];
tmat[1][1] = tmat[1][1]
- tmp * tmat[0][1];
tmat[1][2] = tmat[1][2]
- tmp * tmat[0][2];
tmat[1][3] = tmat[1][3]
- tmp * tmat[0][3];
tmat[1][4] = tmat[1][4]
- tmp * tmat[0][4];
v[i][j][k][1] = v[i][j][k][1]
- v[i][j][k][0] * tmp;
tmp = tmp1 * tmat[2][0];
tmat[2][1] = tmat[2][1]
- tmp * tmat[0][1];
tmat[2][2] = tmat[2][2]
- tmp * tmat[0][2];
tmat[2][3] = tmat[2][3]
- tmp * tmat[0][3];
tmat[2][4] = tmat[2][4]
- tmp * tmat[0][4];
v[i][j][k][2] = v[i][j][k][2]
- v[i][j][k][0] * tmp;
tmp = tmp1 * tmat[3][0];
tmat[3][1] = tmat[3][1]
- tmp * tmat[0][1];
tmat[3][2] = tmat[3][2]
- tmp * tmat[0][2];
tmat[3][3] = tmat[3][3]
- tmp * tmat[0][3];
tmat[3][4] = tmat[3][4]
- tmp * tmat[0][4];
v[i][j][k][3] = v[i][j][k][3]
- v[i][j][k][0] * tmp;
tmp = tmp1 * tmat[4][0];
tmat[4][1] = tmat[4][1]
- tmp * tmat[0][1];
tmat[4][2] = tmat[4][2]
- tmp * tmat[0][2];
tmat[4][3] = tmat[4][3]
- tmp * tmat[0][3];
tmat[4][4] = tmat[4][4]
- tmp * tmat[0][4];
v[i][j][k][4] = v[i][j][k][4]
- v[i][j][k][0] * tmp;
tmp1 = 1.0 / tmat[ 1][1];
tmp = tmp1 * tmat[ 2][1];
tmat[2][2] = tmat[2][2]
- tmp * tmat[1][2];
tmat[2][3] = tmat[2][3]
- tmp * tmat[1][3];
tmat[2][4] = tmat[2][4]
- tmp * tmat[1][4];
v[i][j][k][2] = v[i][j][k][2]
- v[i][j][k][1] * tmp;
tmp = tmp1 * tmat[3][1];
tmat[3][2] = tmat[3][2]
- tmp * tmat[1][2];
tmat[3][3] = tmat[3][3]
- tmp * tmat[1][3];
tmat[3][4] = tmat[3][4]
- tmp * tmat[1][4];
v[i][j][k][3] = v[i][j][k][3]
- v[i][j][k][1] * tmp;
tmp = tmp1 * tmat[4][1];
tmat[4][2] = tmat[4][2]
- tmp * tmat[1][2];
tmat[4][3] = tmat[4][3]
- tmp * tmat[1][3];
tmat[4][4] = tmat[4][4]
- tmp * tmat[1][4];
v[i][j][k][4] = v[i][j][k][4]
- v[i][j][k][1] * tmp;
tmp1 = 1.0 / tmat[2][2];
tmp = tmp1 * tmat[3][2];
tmat[3][3] = tmat[3][3]
- tmp * tmat[2][3];
tmat[3][4] = tmat[3][4]
- tmp * tmat[2][4];
v[i][j][k][3] = v[i][j][k][3]
- v[i][j][k][2] * tmp;
tmp = tmp1 * tmat[4][2];
tmat[4][3] = tmat[4][3]
- tmp * tmat[2][3];
tmat[4][4] = tmat[4][4]
- tmp * tmat[2][4];
v[i][j][k][4] = v[i][j][k][4]
- v[i][j][k][2] * tmp;
tmp1 = 1.0 / tmat[3][3];
tmp = tmp1 * tmat[4][3];
tmat[4][4] = tmat[4][4]
- tmp * tmat[3][4];
v[i][j][k][4] = v[i][j][k][4]
- v[i][j][k][3] * tmp;
/*--------------------------------------------------------------------
c back substitution
--------------------------------------------------------------------*/
v[i][j][k][4] = v[i][j][k][4]
/ tmat[4][4];
v[i][j][k][3] = v[i][j][k][3]
- tmat[3][4] * v[i][j][k][4];
v[i][j][k][3] = v[i][j][k][3]
/ tmat[3][3];
v[i][j][k][2] = v[i][j][k][2]
- tmat[2][3] * v[i][j][k][3]
- tmat[2][4] * v[i][j][k][4];
v[i][j][k][2] = v[i][j][k][2]
/ tmat[2][2];
v[i][j][k][1] = v[i][j][k][1]
- tmat[1][2] * v[i][j][k][2]
- tmat[1][3] * v[i][j][k][3]
- tmat[1][4] * v[i][j][k][4];
v[i][j][k][1] = v[i][j][k][1]
/ tmat[1][1];
v[i][j][k][0] = v[i][j][k][0]
- tmat[0][1] * v[i][j][k][1]
- tmat[0][2] * v[i][j][k][2]
- tmat[0][3] * v[i][j][k][3]
- tmat[0][4] * v[i][j][k][4];
v[i][j][k][0] = v[i][j][k][0]
/ tmat[0][0];
}
#if defined(_OPENMP)
if (i != ist) flag[i-1] = 0;
if (i != iend) flag[i] = 1;
#pragma omp flush(flag)
#endif /* _OPENMP */
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void buts(int nx, int ny, int nz, int k,
double omega,
/*--------------------------------------------------------------------
c To improve cache performance, second two dimensions padded by 1
c for even number sizes only. Only needed in v.
--------------------------------------------------------------------*/
double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5],
double tv[ISIZ1][ISIZ2][5],
double d[ISIZ1][ISIZ2][5][5],
double udx[ISIZ1][ISIZ2][5][5],
double udy[ISIZ1][ISIZ2][5][5],
double udz[ISIZ1][ISIZ2][5][5],
int ist, int iend, int jst, int jend,
int nx0, int ny0 ) {
/*--------------------------------------------------------------------
c
c compute the regular-sparse, block upper triangular solution:
c
c v <-- ( U-inv ) * v
c
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, m;
double tmp, tmp1;
double tmat[5][5];
#pragma omp for nowait schedule(static)
for (i = iend; i >= ist; i--) {
for (j = jend; j >= jst; j--) {
for (m = 0; m < 5; m++) {
tv[i][j][m] =
omega * ( udz[i][j][m][0] * v[i][j][k+1][0]
+ udz[i][j][m][1] * v[i][j][k+1][1]
+ udz[i][j][m][2] * v[i][j][k+1][2]
+ udz[i][j][m][3] * v[i][j][k+1][3]
+ udz[i][j][m][4] * v[i][j][k+1][4] );
}
}
}
#pragma omp for nowait schedule(static)
for (i = iend; i >= ist; i--) {
#if defined(_OPENMP)
if (i != iend) {
while (flag[i+1] == 0) {
#pragma omp flush(flag)
;
}
}
if (i != ist) {
while (flag[i] == 1) {
#pragma omp flush(flag)
;
}
}
#endif /* _OPENMP */
for (j = jend; j >= jst; j--) {
for (m = 0; m < 5; m++) {
tv[i][j][m] = tv[i][j][m]
+ omega * ( udy[i][j][m][0] * v[i][j+1][k][0]
+ udx[i][j][m][0] * v[i+1][j][k][0]
+ udy[i][j][m][1] * v[i][j+1][k][1]
+ udx[i][j][m][1] * v[i+1][j][k][1]
+ udy[i][j][m][2] * v[i][j+1][k][2]
+ udx[i][j][m][2] * v[i+1][j][k][2]
+ udy[i][j][m][3] * v[i][j+1][k][3]
+ udx[i][j][m][3] * v[i+1][j][k][3]
+ udy[i][j][m][4] * v[i][j+1][k][4]
+ udx[i][j][m][4] * v[i+1][j][k][4] );
}
/*--------------------------------------------------------------------
c diagonal block inversion
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
tmat[m][0] = d[i][j][m][0];
tmat[m][1] = d[i][j][m][1];
tmat[m][2] = d[i][j][m][2];
tmat[m][3] = d[i][j][m][3];
tmat[m][4] = d[i][j][m][4];
}
tmp1 = 1.0 / tmat[0][0];
tmp = tmp1 * tmat[1][0];
tmat[1][1] = tmat[1][1]
- tmp * tmat[0][1];
tmat[1][2] = tmat[1][2]
- tmp * tmat[0][2];
tmat[1][3] = tmat[1][3]
- tmp * tmat[0][3];
tmat[1][4] = tmat[1][4]
- tmp * tmat[0][4];
tv[i][j][1] = tv[i][j][1]
- tv[i][j][0] * tmp;
tmp = tmp1 * tmat[2][0];
tmat[2][1] = tmat[2][1]
- tmp * tmat[0][1];
tmat[2][2] = tmat[2][2]
- tmp * tmat[0][2];
tmat[2][3] = tmat[2][3]
- tmp * tmat[0][3];
tmat[2][4] = tmat[2][4]
- tmp * tmat[0][4];
tv[i][j][2] = tv[i][j][2]
- tv[i][j][0] * tmp;
tmp = tmp1 * tmat[3][0];
tmat[3][1] = tmat[3][1]
- tmp * tmat[0][1];
tmat[3][2] = tmat[3][2]
- tmp * tmat[0][2];
tmat[3][3] = tmat[3][3]
- tmp * tmat[0][3];
tmat[3][4] = tmat[3][4]
- tmp * tmat[0][4];
tv[i][j][3] = tv[i][j][3]
- tv[i][j][0] * tmp;
tmp = tmp1 * tmat[4][0];
tmat[4][1] = tmat[4][1]
- tmp * tmat[0][1];
tmat[4][2] = tmat[4][2]
- tmp * tmat[0][2];
tmat[4][3] = tmat[4][3]
- tmp * tmat[0][3];
tmat[4][4] = tmat[4][4]
- tmp * tmat[0][4];
tv[i][j][4] = tv[i][j][4]
- tv[i][j][0] * tmp;
tmp1 = 1.0 / tmat[1][1];
tmp = tmp1 * tmat[2][1];
tmat[2][2] = tmat[2][2]
- tmp * tmat[1][2];
tmat[2][3] = tmat[2][3]
- tmp * tmat[1][3];
tmat[2][4] = tmat[2][4]
- tmp * tmat[1][4];
tv[i][j][2] = tv[i][j][2]
- tv[i][j][1] * tmp;
tmp = tmp1 * tmat[3][1];
tmat[3][2] = tmat[3][2]
- tmp * tmat[1][2];
tmat[3][3] = tmat[3][3]
- tmp * tmat[1][3];
tmat[3][4] = tmat[3][4]
- tmp * tmat[1][4];
tv[i][j][3] = tv[i][j][3]
- tv[i][j][1] * tmp;
tmp = tmp1 * tmat[4][1];
tmat[4][2] = tmat[4][2]
- tmp * tmat[1][2];
tmat[4][3] = tmat[4][3]
- tmp * tmat[1][3];
tmat[4][4] = tmat[4][4]
- tmp * tmat[1][4];
tv[i][j][4] = tv[i][j][4]
- tv[i][j][1] * tmp;
tmp1 = 1.0 / tmat[2][2];
tmp = tmp1 * tmat[3][2];
tmat[3][3] = tmat[3][3]
- tmp * tmat[2][3];
tmat[3][4] = tmat[3][4]
- tmp * tmat[2][4];
tv[i][j][3] = tv[i][j][3]
- tv[i][j][2] * tmp;
tmp = tmp1 * tmat[4][2];
tmat[4][3] = tmat[4][3]
- tmp * tmat[2][3];
tmat[4][4] = tmat[4][4]
- tmp * tmat[2][4];
tv[i][j][4] = tv[i][j][4]
- tv[i][j][2] * tmp;
tmp1 = 1.0 / tmat[3][3];
tmp = tmp1 * tmat[4][3];
tmat[4][4] = tmat[4][4]
- tmp * tmat[3][4];
tv[i][j][4] = tv[i][j][4]
- tv[i][j][3] * tmp;
/*--------------------------------------------------------------------
c back substitution
--------------------------------------------------------------------*/
tv[i][j][4] = tv[i][j][4]
/ tmat[4][4];
tv[i][j][3] = tv[i][j][3]
- tmat[3][4] * tv[i][j][4];
tv[i][j][3] = tv[i][j][3]
/ tmat[3][3];
tv[i][j][2] = tv[i][j][2]
- tmat[2][3] * tv[i][j][3]
- tmat[2][4] * tv[i][j][4];
tv[i][j][2] = tv[i][j][2]
/ tmat[2][2];
tv[i][j][1] = tv[i][j][1]
- tmat[1][2] * tv[i][j][2]
- tmat[1][3] * tv[i][j][3]
- tmat[1][4] * tv[i][j][4];
tv[i][j][1] = tv[i][j][1]
/ tmat[1][1];
tv[i][j][0] = tv[i][j][0]
- tmat[0][1] * tv[i][j][1]
- tmat[0][2] * tv[i][j][2]
- tmat[0][3] * tv[i][j][3]
- tmat[0][4] * tv[i][j][4];
tv[i][j][0] = tv[i][j][0]
/ tmat[0][0];
v[i][j][k][0] = v[i][j][k][0] - tv[i][j][0];
v[i][j][k][1] = v[i][j][k][1] - tv[i][j][1];
v[i][j][k][2] = v[i][j][k][2] - tv[i][j][2];
v[i][j][k][3] = v[i][j][k][3] - tv[i][j][3];
v[i][j][k][4] = v[i][j][k][4] - tv[i][j][4];
}
#if defined(_OPENMP)
if (i != iend) flag[i+1] = 0;
if (i != ist) flag[i] = 1;
#pragma omp flush(flag)
#endif /* _OPENMP */
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void domain(void) {
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
nx = nx0;
ny = ny0;
nz = nz0;
/*--------------------------------------------------------------------
c check the sub-domain size
--------------------------------------------------------------------*/
if ( nx < 4 || ny < 4 || nz < 4 ) {
printf(" SUBDOMAIN SIZE IS TOO SMALL - \n"
" ADJUST PROBLEM SIZE OR NUMBER OF PROCESSORS\n"
" SO THAT NX, NY AND NZ ARE GREATER THAN OR EQUAL\n"
" TO 4 THEY ARE CURRENTLY%3d%3d%3d\n", nx, ny, nz);
exit(1);
}
if ( nx > ISIZ1 || ny > ISIZ2 || nz > ISIZ3 ) {
printf(" SUBDOMAIN SIZE IS TOO LARGE - \n"
" ADJUST PROBLEM SIZE OR NUMBER OF PROCESSORS\n"
" SO THAT NX, NY AND NZ ARE LESS THAN OR EQUAL TO \n"
" ISIZ1, ISIZ2 AND ISIZ3 RESPECTIVELY. THEY ARE\n"
" CURRENTLY%4d%4d%4d\n", nx, ny, nz);
exit(1);
}
/*--------------------------------------------------------------------
c set up the start and end in i and j extents for all processors
--------------------------------------------------------------------*/
ist = 1;
iend = nx - 2;
jst = 1;
jend = ny - 2;
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void erhs(void) {
#pragma omp parallel
{
/*--------------------------------------------------------------------
c
c compute the right hand side based on exact solution
c
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, k, m;
int iglob, jglob;
int L1, L2;
int ist1, iend1;
int jst1, jend1;
double dsspm;
double xi, eta, zeta;
double q;
double u21, u31, u41;
double tmp;
double u21i, u31i, u41i, u51i;
double u21j, u31j, u41j, u51j;
double u21k, u31k, u41k, u51k;
double u21im1, u31im1, u41im1, u51im1;
double u21jm1, u31jm1, u41jm1, u51jm1;
double u21km1, u31km1, u41km1, u51km1;
dsspm = dssp;
#pragma omp for
for (i = 0; i < nx; i++) {
for (j = 0; j < ny; j++) {
for (k = 0; k < nz; k++) {
for (m = 0; m < 5; m++) {
frct[i][j][k][m] = 0.0;
}
}
}
}
#pragma omp for
for (i = 0; i < nx; i++) {
iglob = i;
xi = ( (double)(iglob) ) / ( nx0 - 1 );
for (j = 0; j < ny; j++) {
jglob = j;
eta = ( (double)(jglob) ) / ( ny0 - 1 );
for (k = 0; k < nz; k++) {
zeta = ( (double)(k) ) / ( nz - 1 );
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = ce[m][0]
+ ce[m][1] * xi
+ ce[m][2] * eta
+ ce[m][3] * zeta
+ ce[m][4] * xi * xi
+ ce[m][5] * eta * eta
+ ce[m][6] * zeta * zeta
+ ce[m][7] * xi * xi * xi
+ ce[m][8] * eta * eta * eta
+ ce[m][9] * zeta * zeta * zeta
+ ce[m][10] * xi * xi * xi * xi
+ ce[m][11] * eta * eta * eta * eta
+ ce[m][12] * zeta * zeta * zeta * zeta;
}
}
}
}
/*--------------------------------------------------------------------
c xi-direction flux differences
--------------------------------------------------------------------*/
L1 = 0;
L2 = nx-1;
#pragma omp for
for (i = L1; i <= L2; i++) {
for (j = jst; j <= jend; j++) {
for (k = 1; k < nz - 1; k++) {
flux[i][j][k][0] = rsd[i][j][k][1];
u21 = rsd[i][j][k][1] / rsd[i][j][k][0];
q = 0.50 * ( rsd[i][j][k][1] * rsd[i][j][k][1]
+ rsd[i][j][k][2] * rsd[i][j][k][2]
+ rsd[i][j][k][3] * rsd[i][j][k][3] )
/ rsd[i][j][k][0];
flux[i][j][k][1] = rsd[i][j][k][1] * u21 + C2 *
( rsd[i][j][k][4] - q );
flux[i][j][k][2] = rsd[i][j][k][2] * u21;
flux[i][j][k][3] = rsd[i][j][k][3] * u21;
flux[i][j][k][4] = ( C1 * rsd[i][j][k][4] - C2 * q ) * u21;
}
}
}
#pragma omp for
for (j = jst; j <= jend; j++) {
for (k = 1; k <= nz - 2; k++) {
for (i = ist; i <= iend; i++) {
for (m = 0; m < 5; m++) {
frct[i][j][k][m] = frct[i][j][k][m]
- tx2 * ( flux[i+1][j][k][m] - flux[i-1][j][k][m] );
}
}
for (i = ist; i <= L2; i++) {
tmp = 1.0 / rsd[i][j][k][0];
u21i = tmp * rsd[i][j][k][1];
u31i = tmp * rsd[i][j][k][2];
u41i = tmp * rsd[i][j][k][3];
u51i = tmp * rsd[i][j][k][4];
tmp = 1.0 / rsd[i-1][j][k][0];
u21im1 = tmp * rsd[i-1][j][k][1];
u31im1 = tmp * rsd[i-1][j][k][2];
u41im1 = tmp * rsd[i-1][j][k][3];
u51im1 = tmp * rsd[i-1][j][k][4];
flux[i][j][k][1] = (4.0/3.0) * tx3 *
( u21i - u21im1 );
flux[i][j][k][2] = tx3 * ( u31i - u31im1 );
flux[i][j][k][3] = tx3 * ( u41i - u41im1 );
flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )
* tx3 * ( ( u21i * u21i + u31i * u31i + u41i * u41i )
- ( u21im1*u21im1 + u31im1*u31im1 + u41im1*u41im1 ) )
+ (1.0/6.0)
* tx3 * ( u21i*u21i - u21im1*u21im1 )
+ C1 * C5 * tx3 * ( u51i - u51im1 );
}
for (i = ist; i <= iend; i++) {
frct[i][j][k][0] = frct[i][j][k][0]
+ dx1 * tx1 * ( rsd[i-1][j][k][0]
- 2.0 * rsd[i][j][k][0]
+ rsd[i+1][j][k][0] );
frct[i][j][k][1] = frct[i][j][k][1]
+ tx3 * C3 * C4 * ( flux[i+1][j][k][1] - flux[i][j][k][1] )
+ dx2 * tx1 * ( rsd[i-1][j][k][1]
- 2.0 * rsd[i][j][k][1]
+ rsd[i+1][j][k][1] );
frct[i][j][k][2] = frct[i][j][k][2]
+ tx3 * C3 * C4 * ( flux[i+1][j][k][2] - flux[i][j][k][2] )
+ dx3 * tx1 * ( rsd[i-1][j][k][2]
- 2.0 * rsd[i][j][k][2]
+ rsd[i+1][j][k][2] );
frct[i][j][k][3] = frct[i][j][k][3]
+ tx3 * C3 * C4 * ( flux[i+1][j][k][3] - flux[i][j][k][3] )
+ dx4 * tx1 * ( rsd[i-1][j][k][3]
- 2.0 * rsd[i][j][k][3]
+ rsd[i+1][j][k][3] );
frct[i][j][k][4] = frct[i][j][k][4]
+ tx3 * C3 * C4 * ( flux[i+1][j][k][4] - flux[i][j][k][4] )
+ dx5 * tx1 * ( rsd[i-1][j][k][4]
- 2.0 * rsd[i][j][k][4]
+ rsd[i+1][j][k][4] );
}
/*--------------------------------------------------------------------
c Fourth-order dissipation
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
frct[1][j][k][m] = frct[1][j][k][m]
- dsspm * ( + 5.0 * rsd[1][j][k][m]
- 4.0 * rsd[2][j][k][m]
+ rsd[3][j][k][m] );
frct[2][j][k][m] = frct[2][j][k][m]
- dsspm * ( - 4.0 * rsd[1][j][k][m]
+ 6.0 * rsd[2][j][k][m]
- 4.0 * rsd[3][j][k][m]
+ rsd[4][j][k][m] );
}
ist1 = 3;
iend1 = nx - 4;
for (i = ist1; i <=iend1; i++) {
for (m = 0; m < 5; m++) {
frct[i][j][k][m] = frct[i][j][k][m]
- dsspm * ( rsd[i-2][j][k][m]
- 4.0 * rsd[i-1][j][k][m]
+ 6.0 * rsd[i][j][k][m]
- 4.0 * rsd[i+1][j][k][m]
+ rsd[i+2][j][k][m] );
}
}
for (m = 0; m < 5; m++) {
frct[nx-3][j][k][m] = frct[nx-3][j][k][m]
- dsspm * ( rsd[nx-5][j][k][m]
- 4.0 * rsd[nx-4][j][k][m]
+ 6.0 * rsd[nx-3][j][k][m]
- 4.0 * rsd[nx-2][j][k][m] );
frct[nx-2][j][k][m] = frct[nx-2][j][k][m]
- dsspm * ( rsd[nx-4][j][k][m]
- 4.0 * rsd[nx-3][j][k][m]
+ 5.0 * rsd[nx-2][j][k][m] );
}
}
}
/*--------------------------------------------------------------------
c eta-direction flux differences
--------------------------------------------------------------------*/
L1 = 0;
L2 = ny-1;
#pragma omp for
for (i = ist; i <= iend; i++) {
for (j = L1; j <= L2; j++) {
for (k = 1; k <= nz - 2; k++) {
flux[i][j][k][0] = rsd[i][j][k][2];
u31 = rsd[i][j][k][2] / rsd[i][j][k][0];
q = 0.50 * ( rsd[i][j][k][1] * rsd[i][j][k][1]
+ rsd[i][j][k][2] * rsd[i][j][k][2]
+ rsd[i][j][k][3] * rsd[i][j][k][3] )
/ rsd[i][j][k][0];
flux[i][j][k][1] = rsd[i][j][k][1] * u31;
flux[i][j][k][2] = rsd[i][j][k][2] * u31 + C2 *
( rsd[i][j][k][4] - q );
flux[i][j][k][3] = rsd[i][j][k][3] * u31;
flux[i][j][k][4] = ( C1 * rsd[i][j][k][4] - C2 * q ) * u31;
}
}
}
#pragma omp for
for (i = ist; i <= iend; i++) {
for (k = 1; k <= nz - 2; k++) {
for (j = jst; j <= jend; j++) {
for (m = 0; m < 5; m++) {
frct[i][j][k][m] = frct[i][j][k][m]
- ty2 * ( flux[i][j+1][k][m] - flux[i][j-1][k][m] );
}
}
for (j = jst; j <= L2; j++) {
tmp = 1.0 / rsd[i][j][k][0];
u21j = tmp * rsd[i][j][k][1];
u31j = tmp * rsd[i][j][k][2];
u41j = tmp * rsd[i][j][k][3];
u51j = tmp * rsd[i][j][k][4];
tmp = 1.0 / rsd[i][j-1][k][0];
u21jm1 = tmp * rsd[i][j-1][k][1];
u31jm1 = tmp * rsd[i][j-1][k][2];
u41jm1 = tmp * rsd[i][j-1][k][3];
u51jm1 = tmp * rsd[i][j-1][k][4];
flux[i][j][k][1] = ty3 * ( u21j - u21jm1 );
flux[i][j][k][2] = (4.0/3.0) * ty3 *
( u31j - u31jm1 );
flux[i][j][k][3] = ty3 * ( u41j - u41jm1 );
flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )
* ty3 * ( ( u21j *u21j + u31j *u31j + u41j *u41j )
- ( u21jm1*u21jm1 + u31jm1*u31jm1 + u41jm1*u41jm1 ) )
+ (1.0/6.0)
* ty3 * ( u31j*u31j - u31jm1*u31jm1 )
+ C1 * C5 * ty3 * ( u51j - u51jm1 );
}
for (j = jst; j <= jend; j++) {
frct[i][j][k][0] = frct[i][j][k][0]
+ dy1 * ty1 * ( rsd[i][j-1][k][0]
- 2.0 * rsd[i][j][k][0]
+ rsd[i][j+1][k][0] );
frct[i][j][k][1] = frct[i][j][k][1]
+ ty3 * C3 * C4 * ( flux[i][j+1][k][1] - flux[i][j][k][1] )
+ dy2 * ty1 * ( rsd[i][j-1][k][1]
- 2.0 * rsd[i][j][k][1]
+ rsd[i][j+1][k][1] );
frct[i][j][k][2] = frct[i][j][k][2]
+ ty3 * C3 * C4 * ( flux[i][j+1][k][2] - flux[i][j][k][2] )
+ dy3 * ty1 * ( rsd[i][j-1][k][2]
- 2.0 * rsd[i][j][k][2]
+ rsd[i][j+1][k][2] );
frct[i][j][k][3] = frct[i][j][k][3]
+ ty3 * C3 * C4 * ( flux[i][j+1][k][3] - flux[i][j][k][3] )
+ dy4 * ty1 * ( rsd[i][j-1][k][3]
- 2.0 * rsd[i][j][k][3]
+ rsd[i][j+1][k][3] );
frct[i][j][k][4] = frct[i][j][k][4]
+ ty3 * C3 * C4 * ( flux[i][j+1][k][4] - flux[i][j][k][4] )
+ dy5 * ty1 * ( rsd[i][j-1][k][4]
- 2.0 * rsd[i][j][k][4]
+ rsd[i][j+1][k][4] );
}
/*--------------------------------------------------------------------
c fourth-order dissipation
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
frct[i][1][k][m] = frct[i][1][k][m]
- dsspm * ( + 5.0 * rsd[i][1][k][m]
- 4.0 * rsd[i][2][k][m]
+ rsd[i][3][k][m] );
frct[i][2][k][m] = frct[i][2][k][m]
- dsspm * ( - 4.0 * rsd[i][1][k][m]
+ 6.0 * rsd[i][2][k][m]
- 4.0 * rsd[i][3][k][m]
+ rsd[i][4][k][m] );
}
jst1 = 3;
jend1 = ny - 4;
for (j = jst1; j <= jend1; j++) {
for (m = 0; m < 5; m++) {
frct[i][j][k][m] = frct[i][j][k][m]
- dsspm * ( rsd[i][j-2][k][m]
- 4.0 * rsd[i][j-1][k][m]
+ 6.0 * rsd[i][j][k][m]
- 4.0 * rsd[i][j+1][k][m]
+ rsd[i][j+2][k][m] );
}
}
for (m = 0; m < 5; m++) {
frct[i][ny-3][k][m] = frct[i][ny-3][k][m]
- dsspm * ( rsd[i][ny-5][k][m]
- 4.0 * rsd[i][ny-4][k][m]
+ 6.0 * rsd[i][ny-3][k][m]
- 4.0 * rsd[i][ny-2][k][m] );
frct[i][ny-2][k][m] = frct[i][ny-2][k][m]
- dsspm * ( rsd[i][ny-4][k][m]
- 4.0 * rsd[i][ny-3][k][m]
+ 5.0 * rsd[i][ny-2][k][m] );
}
}
}
/*--------------------------------------------------------------------
c zeta-direction flux differences
--------------------------------------------------------------------*/
#pragma omp for
for (i = ist; i <= iend; i++) {
for (j = jst; j <= jend; j++) {
for (k = 0; k <= nz-1; k++) {
flux[i][j][k][0] = rsd[i][j][k][3];
u41 = rsd[i][j][k][3] / rsd[i][j][k][0];
q = 0.50 * ( rsd[i][j][k][1] * rsd[i][j][k][1]
+ rsd[i][j][k][2] * rsd[i][j][k][2]
+ rsd[i][j][k][3] * rsd[i][j][k][3] )
/ rsd[i][j][k][0];
flux[i][j][k][1] = rsd[i][j][k][1] * u41;
flux[i][j][k][2] = rsd[i][j][k][2] * u41;
flux[i][j][k][3] = rsd[i][j][k][3] * u41 + C2 *
( rsd[i][j][k][4] - q );
flux[i][j][k][4] = ( C1 * rsd[i][j][k][4] - C2 * q ) * u41;
}
for (k = 1; k <= nz - 2; k++) {
for (m = 0; m < 5; m++) {
frct[i][j][k][m] = frct[i][j][k][m]
- tz2 * ( flux[i][j][k+1][m] - flux[i][j][k-1][m] );
}
}
for (k = 1; k <= nz-1; k++) {
tmp = 1.0 / rsd[i][j][k][0];
u21k = tmp * rsd[i][j][k][1];
u31k = tmp * rsd[i][j][k][2];
u41k = tmp * rsd[i][j][k][3];
u51k = tmp * rsd[i][j][k][4];
tmp = 1.0 / rsd[i][j][k-1][0];
u21km1 = tmp * rsd[i][j][k-1][1];
u31km1 = tmp * rsd[i][j][k-1][2];
u41km1 = tmp * rsd[i][j][k-1][3];
u51km1 = tmp * rsd[i][j][k-1][4];
flux[i][j][k][1] = tz3 * ( u21k - u21km1 );
flux[i][j][k][2] = tz3 * ( u31k - u31km1 );
flux[i][j][k][3] = (4.0/3.0) * tz3 * ( u41k
- u41km1 );
flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )
* tz3 * ( ( u21k *u21k + u31k *u31k + u41k *u41k )
- ( u21km1*u21km1 + u31km1*u31km1 + u41km1*u41km1 ) )
+ (1.0/6.0)
* tz3 * ( u41k*u41k - u41km1*u41km1 )
+ C1 * C5 * tz3 * ( u51k - u51km1 );
}
for (k = 1; k <= nz - 2; k++) {
frct[i][j][k][0] = frct[i][j][k][0]
+ dz1 * tz1 * ( rsd[i][j][k+1][0]
- 2.0 * rsd[i][j][k][0]
+ rsd[i][j][k-1][0] );
frct[i][j][k][1] = frct[i][j][k][1]
+ tz3 * C3 * C4 * ( flux[i][j][k+1][1] - flux[i][j][k][1] )
+ dz2 * tz1 * ( rsd[i][j][k+1][1]
- 2.0 * rsd[i][j][k][1]
+ rsd[i][j][k-1][1] );
frct[i][j][k][2] = frct[i][j][k][2]
+ tz3 * C3 * C4 * ( flux[i][j][k+1][2] - flux[i][j][k][2] )
+ dz3 * tz1 * ( rsd[i][j][k+1][2]
- 2.0 * rsd[i][j][k][2]
+ rsd[i][j][k-1][2] );
frct[i][j][k][3] = frct[i][j][k][3]
+ tz3 * C3 * C4 * ( flux[i][j][k+1][3] - flux[i][j][k][3] )
+ dz4 * tz1 * ( rsd[i][j][k+1][3]
- 2.0 * rsd[i][j][k][3]
+ rsd[i][j][k-1][3] );
frct[i][j][k][4] = frct[i][j][k][4]
+ tz3 * C3 * C4 * ( flux[i][j][k+1][4] - flux[i][j][k][4] )
+ dz5 * tz1 * ( rsd[i][j][k+1][4]
- 2.0 * rsd[i][j][k][4]
+ rsd[i][j][k-1][4] );
}
/*--------------------------------------------------------------------
c fourth-order dissipation
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
frct[i][j][1][m] = frct[i][j][1][m]
- dsspm * ( + 5.0 * rsd[i][j][1][m]
- 4.0 * rsd[i][j][2][m]
+ rsd[i][j][3][m] );
frct[i][j][2][m] = frct[i][j][2][m]
- dsspm * (- 4.0 * rsd[i][j][1][m]
+ 6.0 * rsd[i][j][2][m]
- 4.0 * rsd[i][j][3][m]
+ rsd[i][j][4][m] );
}
for (k = 3; k <= nz - 4; k++) {
for (m = 0; m < 5; m++) {
frct[i][j][k][m] = frct[i][j][k][m]
- dsspm * ( rsd[i][j][k-2][m]
- 4.0 * rsd[i][j][k-1][m]
+ 6.0 * rsd[i][j][k][m]
- 4.0 * rsd[i][j][k+1][m]
+ rsd[i][j][k+2][m] );
}
}
for (m = 0; m < 5; m++) {
frct[i][j][nz-3][m] = frct[i][j][nz-3][m]
- dsspm * ( rsd[i][j][nz-5][m]
- 4.0 * rsd[i][j][nz-4][m]
+ 6.0 * rsd[i][j][nz-3][m]
- 4.0 * rsd[i][j][nz-2][m] );
frct[i][j][nz-2][m] = frct[i][j][nz-2][m]
- dsspm * ( rsd[i][j][nz-4][m]
- 4.0 * rsd[i][j][nz-3][m]
+ 5.0 * rsd[i][j][nz-2][m] );
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void error(void) {
/*--------------------------------------------------------------------
c
c compute the solution error
c
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, k, m;
int iglob, jglob;
double tmp;
double u000ijk[5];
for (m = 0; m < 5; m++) {
errnm[m] = 0.0;
}
for (i = ist; i <= iend; i++) {
iglob = i;
for (j = jst; j <= jend; j++) {
jglob = j;
for (k = 1; k <= nz-2; k++) {
exact( iglob, jglob, k, u000ijk );
for (m = 0; m < 5; m++) {
tmp = ( u000ijk[m] - u[i][j][k][m] );
errnm[m] = errnm[m] + tmp *tmp;
}
}
}
}
for (m = 0; m < 5; m++) {
errnm[m] = sqrt ( errnm[m] / ( (nx0-2)*(ny0-2)*(nz0-2) ) );
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void exact( int i, int j, int k, double u000ijk[5] ) {
/*--------------------------------------------------------------------
c
c compute the exact solution at (i,j,k)
c
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int m;
double xi, eta, zeta;
xi = ((double)i) / (nx0 - 1);
eta = ((double)j) / (ny0 - 1);
zeta = ((double)k) / (nz - 1);
for (m = 0; m < 5; m++) {
u000ijk[m] = ce[m][0]
+ ce[m][1] * xi
+ ce[m][2] * eta
+ ce[m][3] * zeta
+ ce[m][4] * xi * xi
+ ce[m][5] * eta * eta
+ ce[m][6] * zeta * zeta
+ ce[m][7] * xi * xi * xi
+ ce[m][8] * eta * eta * eta
+ ce[m][9] * zeta * zeta * zeta
+ ce[m][10] * xi * xi * xi * xi
+ ce[m][11] * eta * eta * eta * eta
+ ce[m][12] * zeta * zeta * zeta * zeta;
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void jacld(int k) {
/*--------------------------------------------------------------------
c compute the lower triangular part of the jacobian matrix
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j;
double r43;
double c1345;
double c34;
double tmp1, tmp2, tmp3;
r43 = ( 4.0 / 3.0 );
c1345 = C1 * C3 * C4 * C5;
c34 = C3 * C4;
#pragma omp for nowait schedule(static)
for (i = ist; i <= iend; i++) {
for (j = jst; j <= jend; j++) {
/*--------------------------------------------------------------------
c form the block daigonal
--------------------------------------------------------------------*/
tmp1 = 1.0 / u[i][j][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
d[i][j][0][0] = 1.0
+ dt * 2.0 * ( tx1 * dx1
+ ty1 * dy1
+ tz1 * dz1 );
d[i][j][0][1] = 0.0;
d[i][j][0][2] = 0.0;
d[i][j][0][3] = 0.0;
d[i][j][0][4] = 0.0;
d[i][j][1][0] = dt * 2.0
* ( tx1 * ( - r43 * c34 * tmp2 * u[i][j][k][1] )
+ ty1 * ( - c34 * tmp2 * u[i][j][k][1] )
+ tz1 * ( - c34 * tmp2 * u[i][j][k][1] ) );
d[i][j][1][1] = 1.0
+ dt * 2.0
* ( tx1 * r43 * c34 * tmp1
+ ty1 * c34 * tmp1
+ tz1 * c34 * tmp1 )
+ dt * 2.0 * ( tx1 * dx2
+ ty1 * dy2
+ tz1 * dz2 );
d[i][j][1][2] = 0.0;
d[i][j][1][3] = 0.0;
d[i][j][1][4] = 0.0;
d[i][j][2][0] = dt * 2.0
* ( tx1 * ( - c34 * tmp2 * u[i][j][k][2] )
+ ty1 * ( - r43 * c34 * tmp2 * u[i][j][k][2] )
+ tz1 * ( - c34 * tmp2 * u[i][j][k][2] ) );
d[i][j][2][1] = 0.0;
d[i][j][2][2] = 1.0
+ dt * 2.0
* ( tx1 * c34 * tmp1
+ ty1 * r43 * c34 * tmp1
+ tz1 * c34 * tmp1 )
+ dt * 2.0 * ( tx1 * dx3
+ ty1 * dy3
+ tz1 * dz3 );
d[i][j][2][3] = 0.0;
d[i][j][2][4] = 0.0;
d[i][j][3][0] = dt * 2.0
* ( tx1 * ( - c34 * tmp2 * u[i][j][k][3] )
+ ty1 * ( - c34 * tmp2 * u[i][j][k][3] )
+ tz1 * ( - r43 * c34 * tmp2 * u[i][j][k][3] ) );
d[i][j][3][1] = 0.0;
d[i][j][3][2] = 0.0;
d[i][j][3][3] = 1.0
+ dt * 2.0
* ( tx1 * c34 * tmp1
+ ty1 * c34 * tmp1
+ tz1 * r43 * c34 * tmp1 )
+ dt * 2.0 * ( tx1 * dx4
+ ty1 * dy4
+ tz1 * dz4 );
d[i][j][3][4] = 0.0;
d[i][j][4][0] = dt * 2.0
* ( tx1 * ( - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) )
- ( c1345 ) * tmp2 * u[i][j][k][4] )
+ ty1 * ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) )
- ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) )
- ( c1345 ) * tmp2 * u[i][j][k][4] )
+ tz1 * ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) )
- ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) )
- ( c1345 ) * tmp2 * u[i][j][k][4] ) );
d[i][j][4][1] = dt * 2.0
* ( tx1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k][1]
+ ty1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][1]
+ tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][1] );
d[i][j][4][2] = dt * 2.0
* ( tx1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][2]
+ ty1 * ( r43*c34 -c1345 ) * tmp2 * u[i][j][k][2]
+ tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][2] );
d[i][j][4][3] = dt * 2.0
* ( tx1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][3]
+ ty1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][3]
+ tz1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k][3] );
d[i][j][4][4] = 1.0
+ dt * 2.0 * ( tx1 * c1345 * tmp1
+ ty1 * c1345 * tmp1
+ tz1 * c1345 * tmp1 )
+ dt * 2.0 * ( tx1 * dx5
+ ty1 * dy5
+ tz1 * dz5 );
/*--------------------------------------------------------------------
c form the first block sub-diagonal
--------------------------------------------------------------------*/
tmp1 = 1.0 / u[i][j][k-1][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
a[i][j][0][0] = - dt * tz1 * dz1;
a[i][j][0][1] = 0.0;
a[i][j][0][2] = 0.0;
a[i][j][0][3] = - dt * tz2;
a[i][j][0][4] = 0.0;
a[i][j][1][0] = - dt * tz2
* ( - ( u[i][j][k-1][1]*u[i][j][k-1][3] ) * tmp2 )
- dt * tz1 * ( - c34 * tmp2 * u[i][j][k-1][1] );
a[i][j][1][1] = - dt * tz2 * ( u[i][j][k-1][3] * tmp1 )
- dt * tz1 * c34 * tmp1
- dt * tz1 * dz2 ;
a[i][j][1][2] = 0.0;
a[i][j][1][3] = - dt * tz2 * ( u[i][j][k-1][1] * tmp1 );
a[i][j][1][4] = 0.0;
a[i][j][2][0] = - dt * tz2
* ( - ( u[i][j][k-1][2]*u[i][j][k-1][3] ) * tmp2 )
- dt * tz1 * ( - c34 * tmp2 * u[i][j][k-1][2] );
a[i][j][2][1] = 0.0;
a[i][j][2][2] = - dt * tz2 * ( u[i][j][k-1][3] * tmp1 )
- dt * tz1 * ( c34 * tmp1 )
- dt * tz1 * dz3;
a[i][j][2][3] = - dt * tz2 * ( u[i][j][k-1][2] * tmp1 );
a[i][j][2][4] = 0.0;
a[i][j][3][0] = - dt * tz2
* ( - ( u[i][j][k-1][3] * tmp1 ) *( u[i][j][k-1][3] * tmp1 )
+ 0.50 * C2
* ( ( u[i][j][k-1][1] * u[i][j][k-1][1]
+ u[i][j][k-1][2] * u[i][j][k-1][2]
+ u[i][j][k-1][3] * u[i][j][k-1][3] ) * tmp2 ) )
- dt * tz1 * ( - r43 * c34 * tmp2 * u[i][j][k-1][3] );
a[i][j][3][1] = - dt * tz2
* ( - C2 * ( u[i][j][k-1][1] * tmp1 ) );
a[i][j][3][2] = - dt * tz2
* ( - C2 * ( u[i][j][k-1][2] * tmp1 ) );
a[i][j][3][3] = - dt * tz2 * ( 2.0 - C2 )
* ( u[i][j][k-1][3] * tmp1 )
- dt * tz1 * ( r43 * c34 * tmp1 )
- dt * tz1 * dz4;
a[i][j][3][4] = - dt * tz2 * C2;
a[i][j][4][0] = - dt * tz2
* ( ( C2 * ( u[i][j][k-1][1] * u[i][j][k-1][1]
+ u[i][j][k-1][2] * u[i][j][k-1][2]
+ u[i][j][k-1][3] * u[i][j][k-1][3] ) * tmp2
- C1 * ( u[i][j][k-1][4] * tmp1 ) )
* ( u[i][j][k-1][3] * tmp1 ) )
- dt * tz1
* ( - ( c34 - c1345 ) * tmp3 * (u[i][j][k-1][1]*u[i][j][k-1][1])
- ( c34 - c1345 ) * tmp3 * (u[i][j][k-1][2]*u[i][j][k-1][2])
- ( r43*c34 - c1345 )* tmp3 * (u[i][j][k-1][3]*u[i][j][k-1][3])
- c1345 * tmp2 * u[i][j][k-1][4] );
a[i][j][4][1] = - dt * tz2
* ( - C2 * ( u[i][j][k-1][1]*u[i][j][k-1][3] ) * tmp2 )
- dt * tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k-1][1];
a[i][j][4][2] = - dt * tz2
* ( - C2 * ( u[i][j][k-1][2]*u[i][j][k-1][3] ) * tmp2 )
- dt * tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k-1][2];
a[i][j][4][3] = - dt * tz2
* ( C1 * ( u[i][j][k-1][4] * tmp1 )
- 0.50 * C2
* ( ( u[i][j][k-1][1]*u[i][j][k-1][1]
+ u[i][j][k-1][2]*u[i][j][k-1][2]
+ 3.0*u[i][j][k-1][3]*u[i][j][k-1][3] ) * tmp2 ) )
- dt * tz1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k-1][3];
a[i][j][4][4] = - dt * tz2
* ( C1 * ( u[i][j][k-1][3] * tmp1 ) )
- dt * tz1 * c1345 * tmp1
- dt * tz1 * dz5;
/*--------------------------------------------------------------------
c form the second block sub-diagonal
--------------------------------------------------------------------*/
tmp1 = 1.0 / u[i][j-1][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
b[i][j][0][0] = - dt * ty1 * dy1;
b[i][j][0][1] = 0.0;
b[i][j][0][2] = - dt * ty2;
b[i][j][0][3] = 0.0;
b[i][j][0][4] = 0.0;
b[i][j][1][0] = - dt * ty2
* ( - ( u[i][j-1][k][1]*u[i][j-1][k][2] ) * tmp2 )
- dt * ty1 * ( - c34 * tmp2 * u[i][j-1][k][1] );
b[i][j][1][1] = - dt * ty2 * ( u[i][j-1][k][2] * tmp1 )
- dt * ty1 * ( c34 * tmp1 )
- dt * ty1 * dy2;
b[i][j][1][2] = - dt * ty2 * ( u[i][j-1][k][1] * tmp1 );
b[i][j][1][3] = 0.0;
b[i][j][1][4] = 0.0;
b[i][j][2][0] = - dt * ty2
* ( - ( u[i][j-1][k][2] * tmp1 ) *( u[i][j-1][k][2] * tmp1 )
+ 0.50 * C2 * ( ( u[i][j-1][k][1] * u[i][j-1][k][1]
+ u[i][j-1][k][2] * u[i][j-1][k][2]
+ u[i][j-1][k][3] * u[i][j-1][k][3] )
* tmp2 ) )
- dt * ty1 * ( - r43 * c34 * tmp2 * u[i][j-1][k][2] );
b[i][j][2][1] = - dt * ty2
* ( - C2 * ( u[i][j-1][k][1] * tmp1 ) );
b[i][j][2][2] = - dt * ty2 * ( ( 2.0 - C2 )
* ( u[i][j-1][k][2] * tmp1 ) )
- dt * ty1 * ( r43 * c34 * tmp1 )
- dt * ty1 * dy3;
b[i][j][2][3] = - dt * ty2
* ( - C2 * ( u[i][j-1][k][3] * tmp1 ) );
b[i][j][2][4] = - dt * ty2 * C2;
b[i][j][3][0] = - dt * ty2
* ( - ( u[i][j-1][k][2]*u[i][j-1][k][3] ) * tmp2 )
- dt * ty1 * ( - c34 * tmp2 * u[i][j-1][k][3] );
b[i][j][3][1] = 0.0;
b[i][j][3][2] = - dt * ty2 * ( u[i][j-1][k][3] * tmp1 );
b[i][j][3][3] = - dt * ty2 * ( u[i][j-1][k][2] * tmp1 )
- dt * ty1 * ( c34 * tmp1 )
- dt * ty1 * dy4;
b[i][j][3][4] = 0.0;
b[i][j][4][0] = - dt * ty2
* ( ( C2 * ( u[i][j-1][k][1] * u[i][j-1][k][1]
+ u[i][j-1][k][2] * u[i][j-1][k][2]
+ u[i][j-1][k][3] * u[i][j-1][k][3] ) * tmp2
- C1 * ( u[i][j-1][k][4] * tmp1 ) )
* ( u[i][j-1][k][2] * tmp1 ) )
- dt * ty1
* ( - ( c34 - c1345 )*tmp3*(pow2(u[i][j-1][k][1]))
- ( r43*c34 - c1345 )*tmp3*(pow2(u[i][j-1][k][2]))
- ( c34 - c1345 )*tmp3*(pow2(u[i][j-1][k][3]))
- c1345*tmp2*u[i][j-1][k][4] );
b[i][j][4][1] = - dt * ty2
* ( - C2 * ( u[i][j-1][k][1]*u[i][j-1][k][2] ) * tmp2 )
- dt * ty1
* ( c34 - c1345 ) * tmp2 * u[i][j-1][k][1];
b[i][j][4][2] = - dt * ty2
* ( C1 * ( u[i][j-1][k][4] * tmp1 )
- 0.50 * C2
* ( ( u[i][j-1][k][1]*u[i][j-1][k][1]
+ 3.0 * u[i][j-1][k][2]*u[i][j-1][k][2]
+ u[i][j-1][k][3]*u[i][j-1][k][3] ) * tmp2 ) )
- dt * ty1
* ( r43*c34 - c1345 ) * tmp2 * u[i][j-1][k][2];
b[i][j][4][3] = - dt * ty2
* ( - C2 * ( u[i][j-1][k][2]*u[i][j-1][k][3] ) * tmp2 )
- dt * ty1 * ( c34 - c1345 ) * tmp2 * u[i][j-1][k][3];
b[i][j][4][4] = - dt * ty2
* ( C1 * ( u[i][j-1][k][2] * tmp1 ) )
- dt * ty1 * c1345 * tmp1
- dt * ty1 * dy5;
/*--------------------------------------------------------------------
c form the third block sub-diagonal
--------------------------------------------------------------------*/
tmp1 = 1.0 / u[i-1][j][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
c[i][j][0][0] = - dt * tx1 * dx1;
c[i][j][0][1] = - dt * tx2;
c[i][j][0][2] = 0.0;
c[i][j][0][3] = 0.0;
c[i][j][0][4] = 0.0;
c[i][j][1][0] = - dt * tx2
* ( - ( u[i-1][j][k][1] * tmp1 ) *( u[i-1][j][k][1] * tmp1 )
+ C2 * 0.50 * ( u[i-1][j][k][1] * u[i-1][j][k][1]
+ u[i-1][j][k][2] * u[i-1][j][k][2]
+ u[i-1][j][k][3] * u[i-1][j][k][3] ) * tmp2 )
- dt * tx1 * ( - r43 * c34 * tmp2 * u[i-1][j][k][1] );
c[i][j][1][1] = - dt * tx2
* ( ( 2.0 - C2 ) * ( u[i-1][j][k][1] * tmp1 ) )
- dt * tx1 * ( r43 * c34 * tmp1 )
- dt * tx1 * dx2;
c[i][j][1][2] = - dt * tx2
* ( - C2 * ( u[i-1][j][k][2] * tmp1 ) );
c[i][j][1][3] = - dt * tx2
* ( - C2 * ( u[i-1][j][k][3] * tmp1 ) );
c[i][j][1][4] = - dt * tx2 * C2;
c[i][j][2][0] = - dt * tx2
* ( - ( u[i-1][j][k][1] * u[i-1][j][k][2] ) * tmp2 )
- dt * tx1 * ( - c34 * tmp2 * u[i-1][j][k][2] );
c[i][j][2][1] = - dt * tx2 * ( u[i-1][j][k][2] * tmp1 );
c[i][j][2][2] = - dt * tx2 * ( u[i-1][j][k][1] * tmp1 )
- dt * tx1 * ( c34 * tmp1 )
- dt * tx1 * dx3;
c[i][j][2][3] = 0.0;
c[i][j][2][4] = 0.0;
c[i][j][3][0] = - dt * tx2
* ( - ( u[i-1][j][k][1]*u[i-1][j][k][3] ) * tmp2 )
- dt * tx1 * ( - c34 * tmp2 * u[i-1][j][k][3] );
c[i][j][3][1] = - dt * tx2 * ( u[i-1][j][k][3] * tmp1 );
c[i][j][3][2] = 0.0;
c[i][j][3][3] = - dt * tx2 * ( u[i-1][j][k][1] * tmp1 )
- dt * tx1 * ( c34 * tmp1 )
- dt * tx1 * dx4;
c[i][j][3][4] = 0.0;
c[i][j][4][0] = - dt * tx2
* ( ( C2 * ( u[i-1][j][k][1] * u[i-1][j][k][1]
+ u[i-1][j][k][2] * u[i-1][j][k][2]
+ u[i-1][j][k][3] * u[i-1][j][k][3] ) * tmp2
- C1 * ( u[i-1][j][k][4] * tmp1 ) )
* ( u[i-1][j][k][1] * tmp1 ) )
- dt * tx1
* ( - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i-1][j][k][1]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i-1][j][k][2]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i-1][j][k][3]) )
- c1345 * tmp2 * u[i-1][j][k][4] );
c[i][j][4][1] = - dt * tx2
* ( C1 * ( u[i-1][j][k][4] * tmp1 )
- 0.50 * C2
* ( ( 3.0*u[i-1][j][k][1]*u[i-1][j][k][1]
+ u[i-1][j][k][2]*u[i-1][j][k][2]
+ u[i-1][j][k][3]*u[i-1][j][k][3] ) * tmp2 ) )
- dt * tx1
* ( r43*c34 - c1345 ) * tmp2 * u[i-1][j][k][1];
c[i][j][4][2] = - dt * tx2
* ( - C2 * ( u[i-1][j][k][2]*u[i-1][j][k][1] ) * tmp2 )
- dt * tx1
* ( c34 - c1345 ) * tmp2 * u[i-1][j][k][2];
c[i][j][4][3] = - dt * tx2
* ( - C2 * ( u[i-1][j][k][3]*u[i-1][j][k][1] ) * tmp2 )
- dt * tx1
* ( c34 - c1345 ) * tmp2 * u[i-1][j][k][3];
c[i][j][4][4] = - dt * tx2
* ( C1 * ( u[i-1][j][k][1] * tmp1 ) )
- dt * tx1 * c1345 * tmp1
- dt * tx1 * dx5;
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void jacu(int k) {
/*--------------------------------------------------------------------
c compute the upper triangular part of the jacobian matrix
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j;
double r43;
double c1345;
double c34;
double tmp1, tmp2, tmp3;
r43 = ( 4.0 / 3.0 );
c1345 = C1 * C3 * C4 * C5;
c34 = C3 * C4;
#pragma omp for nowait schedule(static)
#if defined(_OPENMP)
for (i = iend; i >= ist; i--) {
for (j = jend; j >= jst; j--) {
#else
for (i = ist; i <= iend; i++) {
for (j = jst; j <= jend; j++) {
#endif
/*--------------------------------------------------------------------
c form the block daigonal
--------------------------------------------------------------------*/
tmp1 = 1.0 / u[i][j][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
d[i][j][0][0] = 1.0
+ dt * 2.0 * ( tx1 * dx1
+ ty1 * dy1
+ tz1 * dz1 );
d[i][j][0][1] = 0.0;
d[i][j][0][2] = 0.0;
d[i][j][0][3] = 0.0;
d[i][j][0][4] = 0.0;
d[i][j][1][0] = dt * 2.0
* ( tx1 * ( - r43 * c34 * tmp2 * u[i][j][k][1] )
+ ty1 * ( - c34 * tmp2 * u[i][j][k][1] )
+ tz1 * ( - c34 * tmp2 * u[i][j][k][1] ) );
d[i][j][1][1] = 1.0
+ dt * 2.0
* ( tx1 * r43 * c34 * tmp1
+ ty1 * c34 * tmp1
+ tz1 * c34 * tmp1 )
+ dt * 2.0 * ( tx1 * dx2
+ ty1 * dy2
+ tz1 * dz2 );
d[i][j][1][2] = 0.0;
d[i][j][1][3] = 0.0;
d[i][j][1][4] = 0.0;
d[i][j][2][0] = dt * 2.0
* ( tx1 * ( - c34 * tmp2 * u[i][j][k][2] )
+ ty1 * ( - r43 * c34 * tmp2 * u[i][j][k][2] )
+ tz1 * ( - c34 * tmp2 * u[i][j][k][2] ) );
d[i][j][2][1] = 0.0;
d[i][j][2][2] = 1.0
+ dt * 2.0
* ( tx1 * c34 * tmp1
+ ty1 * r43 * c34 * tmp1
+ tz1 * c34 * tmp1 )
+ dt * 2.0 * ( tx1 * dx3
+ ty1 * dy3
+ tz1 * dz3 );
d[i][j][2][3] = 0.0;
d[i][j][2][4] = 0.0;
d[i][j][3][0] = dt * 2.0
* ( tx1 * ( - c34 * tmp2 * u[i][j][k][3] )
+ ty1 * ( - c34 * tmp2 * u[i][j][k][3] )
+ tz1 * ( - r43 * c34 * tmp2 * u[i][j][k][3] ) );
d[i][j][3][1] = 0.0;
d[i][j][3][2] = 0.0;
d[i][j][3][3] = 1.0
+ dt * 2.0
* ( tx1 * c34 * tmp1
+ ty1 * c34 * tmp1
+ tz1 * r43 * c34 * tmp1 )
+ dt * 2.0 * ( tx1 * dx4
+ ty1 * dy4
+ tz1 * dz4 );
d[i][j][3][4] = 0.0;
d[i][j][4][0] = dt * 2.0
* ( tx1 * ( - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) )
- ( c1345 ) * tmp2 * u[i][j][k][4] )
+ ty1 * ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) )
- ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) )
- ( c1345 ) * tmp2 * u[i][j][k][4] )
+ tz1 * ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) )
- ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) )
- ( c1345 ) * tmp2 * u[i][j][k][4] ) );
d[i][j][4][1] = dt * 2.0
* ( tx1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k][1]
+ ty1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][1]
+ tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][1] );
d[i][j][4][2] = dt * 2.0
* ( tx1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][2]
+ ty1 * ( r43*c34 -c1345 ) * tmp2 * u[i][j][k][2]
+ tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][2] );
d[i][j][4][3] = dt * 2.0
* ( tx1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][3]
+ ty1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][3]
+ tz1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k][3] );
d[i][j][4][4] = 1.0
+ dt * 2.0 * ( tx1 * c1345 * tmp1
+ ty1 * c1345 * tmp1
+ tz1 * c1345 * tmp1 )
+ dt * 2.0 * ( tx1 * dx5
+ ty1 * dy5
+ tz1 * dz5 );
/*--------------------------------------------------------------------
c form the first block sub-diagonal
--------------------------------------------------------------------*/
tmp1 = 1.0 / u[i+1][j][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
a[i][j][0][0] = - dt * tx1 * dx1;
a[i][j][0][1] = dt * tx2;
a[i][j][0][2] = 0.0;
a[i][j][0][3] = 0.0;
a[i][j][0][4] = 0.0;
a[i][j][1][0] = dt * tx2
* ( - ( u[i+1][j][k][1] * tmp1 ) *( u[i+1][j][k][1] * tmp1 )
+ C2 * 0.50 * ( u[i+1][j][k][1] * u[i+1][j][k][1]
+ u[i+1][j][k][2] * u[i+1][j][k][2]
+ u[i+1][j][k][3] * u[i+1][j][k][3] ) * tmp2 )
- dt * tx1 * ( - r43 * c34 * tmp2 * u[i+1][j][k][1] );
a[i][j][1][1] = dt * tx2
* ( ( 2.0 - C2 ) * ( u[i+1][j][k][1] * tmp1 ) )
- dt * tx1 * ( r43 * c34 * tmp1 )
- dt * tx1 * dx2;
a[i][j][1][2] = dt * tx2
* ( - C2 * ( u[i+1][j][k][2] * tmp1 ) );
a[i][j][1][3] = dt * tx2
* ( - C2 * ( u[i+1][j][k][3] * tmp1 ) );
a[i][j][1][4] = dt * tx2 * C2 ;
a[i][j][2][0] = dt * tx2
* ( - ( u[i+1][j][k][1] * u[i+1][j][k][2] ) * tmp2 )
- dt * tx1 * ( - c34 * tmp2 * u[i+1][j][k][2] );
a[i][j][2][1] = dt * tx2 * ( u[i+1][j][k][2] * tmp1 );
a[i][j][2][2] = dt * tx2 * ( u[i+1][j][k][1] * tmp1 )
- dt * tx1 * ( c34 * tmp1 )
- dt * tx1 * dx3;
a[i][j][2][3] = 0.0;
a[i][j][2][4] = 0.0;
a[i][j][3][0] = dt * tx2
* ( - ( u[i+1][j][k][1]*u[i+1][j][k][3] ) * tmp2 )
- dt * tx1 * ( - c34 * tmp2 * u[i+1][j][k][3] );
a[i][j][3][1] = dt * tx2 * ( u[i+1][j][k][3] * tmp1 );
a[i][j][3][2] = 0.0;
a[i][j][3][3] = dt * tx2 * ( u[i+1][j][k][1] * tmp1 )
- dt * tx1 * ( c34 * tmp1 )
- dt * tx1 * dx4;
a[i][j][3][4] = 0.0;
a[i][j][4][0] = dt * tx2
* ( ( C2 * ( u[i+1][j][k][1] * u[i+1][j][k][1]
+ u[i+1][j][k][2] * u[i+1][j][k][2]
+ u[i+1][j][k][3] * u[i+1][j][k][3] ) * tmp2
- C1 * ( u[i+1][j][k][4] * tmp1 ) )
* ( u[i+1][j][k][1] * tmp1 ) )
- dt * tx1
* ( - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i+1][j][k][1]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i+1][j][k][2]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i+1][j][k][3]) )
- c1345 * tmp2 * u[i+1][j][k][4] );
a[i][j][4][1] = dt * tx2
* ( C1 * ( u[i+1][j][k][4] * tmp1 )
- 0.50 * C2
* ( ( 3.0*u[i+1][j][k][1]*u[i+1][j][k][1]
+ u[i+1][j][k][2]*u[i+1][j][k][2]
+ u[i+1][j][k][3]*u[i+1][j][k][3] ) * tmp2 ) )
- dt * tx1
* ( r43*c34 - c1345 ) * tmp2 * u[i+1][j][k][1];
a[i][j][4][2] = dt * tx2
* ( - C2 * ( u[i+1][j][k][2]*u[i+1][j][k][1] ) * tmp2 )
- dt * tx1
* ( c34 - c1345 ) * tmp2 * u[i+1][j][k][2];
a[i][j][4][3] = dt * tx2
* ( - C2 * ( u[i+1][j][k][3]*u[i+1][j][k][1] ) * tmp2 )
- dt * tx1
* ( c34 - c1345 ) * tmp2 * u[i+1][j][k][3];
a[i][j][4][4] = dt * tx2
* ( C1 * ( u[i+1][j][k][1] * tmp1 ) )
- dt * tx1 * c1345 * tmp1
- dt * tx1 * dx5;
/*--------------------------------------------------------------------
c form the second block sub-diagonal
--------------------------------------------------------------------*/
tmp1 = 1.0 / u[i][j+1][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
b[i][j][0][0] = - dt * ty1 * dy1;
b[i][j][0][1] = 0.0;
b[i][j][0][2] = dt * ty2;
b[i][j][0][3] = 0.0;
b[i][j][0][4] = 0.0;
b[i][j][1][0] = dt * ty2
* ( - ( u[i][j+1][k][1]*u[i][j+1][k][2] ) * tmp2 )
- dt * ty1 * ( - c34 * tmp2 * u[i][j+1][k][1] );
b[i][j][1][1] = dt * ty2 * ( u[i][j+1][k][2] * tmp1 )
- dt * ty1 * ( c34 * tmp1 )
- dt * ty1 * dy2;
b[i][j][1][2] = dt * ty2 * ( u[i][j+1][k][1] * tmp1 );
b[i][j][1][3] = 0.0;
b[i][j][1][4] = 0.0;
b[i][j][2][0] = dt * ty2
* ( - ( u[i][j+1][k][2] * tmp1 ) *( u[i][j+1][k][2] * tmp1 )
+ 0.50 * C2 * ( ( u[i][j+1][k][1] * u[i][j+1][k][1]
+ u[i][j+1][k][2] * u[i][j+1][k][2]
+ u[i][j+1][k][3] * u[i][j+1][k][3] )
* tmp2 ) )
- dt * ty1 * ( - r43 * c34 * tmp2 * u[i][j+1][k][2] );
b[i][j][2][1] = dt * ty2
* ( - C2 * ( u[i][j+1][k][1] * tmp1 ) );
b[i][j][2][2] = dt * ty2 * ( ( 2.0 - C2 )
* ( u[i][j+1][k][2] * tmp1 ) )
- dt * ty1 * ( r43 * c34 * tmp1 )
- dt * ty1 * dy3;
b[i][j][2][3] = dt * ty2
* ( - C2 * ( u[i][j+1][k][3] * tmp1 ) );
b[i][j][2][4] = dt * ty2 * C2;
b[i][j][3][0] = dt * ty2
* ( - ( u[i][j+1][k][2]*u[i][j+1][k][3] ) * tmp2 )
- dt * ty1 * ( - c34 * tmp2 * u[i][j+1][k][3] );
b[i][j][3][1] = 0.0;
b[i][j][3][2] = dt * ty2 * ( u[i][j+1][k][3] * tmp1 );
b[i][j][3][3] = dt * ty2 * ( u[i][j+1][k][2] * tmp1 )
- dt * ty1 * ( c34 * tmp1 )
- dt * ty1 * dy4;
b[i][j][3][4] = 0.0;
b[i][j][4][0] = dt * ty2
* ( ( C2 * ( u[i][j+1][k][1] * u[i][j+1][k][1]
+ u[i][j+1][k][2] * u[i][j+1][k][2]
+ u[i][j+1][k][3] * u[i][j+1][k][3] ) * tmp2
- C1 * ( u[i][j+1][k][4] * tmp1 ) )
* ( u[i][j+1][k][2] * tmp1 ) )
- dt * ty1
* ( - ( c34 - c1345 )*tmp3*( pow2(u[i][j+1][k][1]) )
- ( r43*c34 - c1345 )*tmp3*( pow2(u[i][j+1][k][2]) )
- ( c34 - c1345 )*tmp3*( pow2(u[i][j+1][k][3]) )
- c1345*tmp2*u[i][j+1][k][4] );
b[i][j][4][1] = dt * ty2
* ( - C2 * ( u[i][j+1][k][1]*u[i][j+1][k][2] ) * tmp2 )
- dt * ty1
* ( c34 - c1345 ) * tmp2 * u[i][j+1][k][1];
b[i][j][4][2] = dt * ty2
* ( C1 * ( u[i][j+1][k][4] * tmp1 )
- 0.50 * C2
* ( ( u[i][j+1][k][1]*u[i][j+1][k][1]
+ 3.0 * u[i][j+1][k][2]*u[i][j+1][k][2]
+ u[i][j+1][k][3]*u[i][j+1][k][3] ) * tmp2 ) )
- dt * ty1
* ( r43*c34 - c1345 ) * tmp2 * u[i][j+1][k][2];
b[i][j][4][3] = dt * ty2
* ( - C2 * ( u[i][j+1][k][2]*u[i][j+1][k][3] ) * tmp2 )
- dt * ty1 * ( c34 - c1345 ) * tmp2 * u[i][j+1][k][3];
b[i][j][4][4] = dt * ty2
* ( C1 * ( u[i][j+1][k][2] * tmp1 ) )
- dt * ty1 * c1345 * tmp1
- dt * ty1 * dy5;
/*--------------------------------------------------------------------
c form the third block sub-diagonal
--------------------------------------------------------------------*/
tmp1 = 1.0 / u[i][j][k+1][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
c[i][j][0][0] = - dt * tz1 * dz1;
c[i][j][0][1] = 0.0;
c[i][j][0][2] = 0.0;
c[i][j][0][3] = dt * tz2;
c[i][j][0][4] = 0.0;
c[i][j][1][0] = dt * tz2
* ( - ( u[i][j][k+1][1]*u[i][j][k+1][3] ) * tmp2 )
- dt * tz1 * ( - c34 * tmp2 * u[i][j][k+1][1] );
c[i][j][1][1] = dt * tz2 * ( u[i][j][k+1][3] * tmp1 )
- dt * tz1 * c34 * tmp1
- dt * tz1 * dz2 ;
c[i][j][1][2] = 0.0;
c[i][j][1][3] = dt * tz2 * ( u[i][j][k+1][1] * tmp1 );
c[i][j][1][4] = 0.0;
c[i][j][2][0] = dt * tz2
* ( - ( u[i][j][k+1][2]*u[i][j][k+1][3] ) * tmp2 )
- dt * tz1 * ( - c34 * tmp2 * u[i][j][k+1][2] );
c[i][j][2][1] = 0.0;
c[i][j][2][2] = dt * tz2 * ( u[i][j][k+1][3] * tmp1 )
- dt * tz1 * ( c34 * tmp1 )
- dt * tz1 * dz3;
c[i][j][2][3] = dt * tz2 * ( u[i][j][k+1][2] * tmp1 );
c[i][j][2][4] = 0.0;
c[i][j][3][0] = dt * tz2
* ( - ( u[i][j][k+1][3] * tmp1 ) *( u[i][j][k+1][3] * tmp1 )
+ 0.50 * C2
* ( ( u[i][j][k+1][1] * u[i][j][k+1][1]
+ u[i][j][k+1][2] * u[i][j][k+1][2]
+ u[i][j][k+1][3] * u[i][j][k+1][3] ) * tmp2 ) )
- dt * tz1 * ( - r43 * c34 * tmp2 * u[i][j][k+1][3] );
c[i][j][3][1] = dt * tz2
* ( - C2 * ( u[i][j][k+1][1] * tmp1 ) );
c[i][j][3][2] = dt * tz2
* ( - C2 * ( u[i][j][k+1][2] * tmp1 ) );
c[i][j][3][3] = dt * tz2 * ( 2.0 - C2 )
* ( u[i][j][k+1][3] * tmp1 )
- dt * tz1 * ( r43 * c34 * tmp1 )
- dt * tz1 * dz4;
c[i][j][3][4] = dt * tz2 * C2;
c[i][j][4][0] = dt * tz2
* ( ( C2 * ( u[i][j][k+1][1] * u[i][j][k+1][1]
+ u[i][j][k+1][2] * u[i][j][k+1][2]
+ u[i][j][k+1][3] * u[i][j][k+1][3] ) * tmp2
- C1 * ( u[i][j][k+1][4] * tmp1 ) )
* ( u[i][j][k+1][3] * tmp1 ) )
- dt * tz1
* ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k+1][1]) )
- ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k+1][2]) )
- ( r43*c34 - c1345 )* tmp3 * ( pow2(u[i][j][k+1][3]) )
- c1345 * tmp2 * u[i][j][k+1][4] );
c[i][j][4][1] = dt * tz2
* ( - C2 * ( u[i][j][k+1][1]*u[i][j][k+1][3] ) * tmp2 )
- dt * tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k+1][1];
c[i][j][4][2] = dt * tz2
* ( - C2 * ( u[i][j][k+1][2]*u[i][j][k+1][3] ) * tmp2 )
- dt * tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k+1][2];
c[i][j][4][3] = dt * tz2
* ( C1 * ( u[i][j][k+1][4] * tmp1 )
- 0.50 * C2
* ( ( u[i][j][k+1][1]*u[i][j][k+1][1]
+ u[i][j][k+1][2]*u[i][j][k+1][2]
+ 3.0*u[i][j][k+1][3]*u[i][j][k+1][3] ) * tmp2 ) )
- dt * tz1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k+1][3];
c[i][j][4][4] = dt * tz2
* ( C1 * ( u[i][j][k+1][3] * tmp1 ) )
- dt * tz1 * c1345 * tmp1
- dt * tz1 * dz5;
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void l2norm (int nx0, int ny0, int nz0,
int ist, int iend,
int jst, int jend,
/*--------------------------------------------------------------------
c To improve cache performance, second two dimensions padded by 1
c for even number sizes only. Only needed in v.
--------------------------------------------------------------------*/
double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5],
double sum[5]) {
#pragma omp parallel
{
/*--------------------------------------------------------------------
c to compute the l2-norm of vector v.
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, k, m;
double sum0=0.0, sum1=0.0, sum2=0.0, sum3=0.0, sum4=0.0;
#pragma omp single
for (m = 0; m < 5; m++) {
sum[m] = 0.0;
}
#pragma omp for nowait
for (i = ist; i <= iend; i++) {
for (j = jst; j <= jend; j++) {
for (k = 1; k <= nz0-2; k++) {
sum0 = sum0 + v[i][j][k][0] * v[i][j][k][0];
sum1 = sum1 + v[i][j][k][1] * v[i][j][k][1];
sum2 = sum2 + v[i][j][k][2] * v[i][j][k][2];
sum3 = sum3 + v[i][j][k][3] * v[i][j][k][3];
sum4 = sum4 + v[i][j][k][4] * v[i][j][k][4];
}
}
}
#pragma omp critical
{
sum[0] += sum0;
sum[1] += sum1;
sum[2] += sum2;
sum[3] += sum3;
sum[4] += sum4;
}
#pragma omp barrier
#pragma omp single
for (m = 0; m < 5; m++) {
sum[m] = sqrt ( sum[m] / ( (nx0-2)*(ny0-2)*(nz0-2) ) );
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void pintgr(void) {
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, k;
int ibeg, ifin, ifin1;
int jbeg, jfin, jfin1;
int iglob, iglob1, iglob2;
int jglob, jglob1, jglob2;
double phi1[ISIZ2+2][ISIZ3+2]; /* phi1(0:isiz2+1,0:isiz3+1) */
double phi2[ISIZ2+2][ISIZ3+2]; /* phi2(0:isiz2+1,0:isiz3+1) */
double frc1, frc2, frc3;
/*--------------------------------------------------------------------
c set up the sub-domains for integeration in each processor
--------------------------------------------------------------------*/
ibeg = nx;
ifin = 0;
iglob1 = -1;
iglob2 = nx-1;
if (iglob1 >= ii1 && iglob2 < ii2+nx) ibeg = 0;
if (iglob1 >= ii1-nx && iglob2 <= ii2) ifin = nx;
if (ii1 >= iglob1 && ii1 <= iglob2) ibeg = ii1;
if (ii2 >= iglob1 && ii2 <= iglob2) ifin = ii2;
jbeg = ny;
jfin = -1;
jglob1 = 0;
jglob2 = ny-1;
if (jglob1 >= ji1 && jglob2 < ji2+ny) jbeg = 0;
if (jglob1 > ji1-ny && jglob2 <= ji2) jfin = ny;
if (ji1 >= jglob1 && ji1 <= jglob2) jbeg = ji1;
if (ji2 >= jglob1 && ji2 <= jglob2) jfin = ji2;
ifin1 = ifin;
jfin1 = jfin;
if (ifin1 == ii2) ifin1 = ifin -1;
if (jfin1 == ji2) jfin1 = jfin -1;
/*--------------------------------------------------------------------
c initialize
--------------------------------------------------------------------*/
for (i = 0; i <= ISIZ2+1; i++) {
for (k = 0; k <= ISIZ3+1; k++) {
phi1[i][k] = 0.0;
phi2[i][k] = 0.0;
}
}
for (i = ibeg; i <= ifin; i++) {
iglob = i;
for (j = jbeg; j <= jfin; j++) {
jglob = j;
k = ki1;
phi1[i][j] = C2*( u[i][j][k][4]
- 0.50 * ( pow2(u[i][j][k][1])
+ pow2(u[i][j][k][2])
+ pow2(u[i][j][k][3]) )
/ u[i][j][k][0] );
k = ki2;
phi2[i][j] = C2*( u[i][j][k][4]
- 0.50 * ( pow2(u[i][j][k][1])
+ pow2(u[i][j][k][2])
+ pow2(u[i][j][k][3]) )
/ u[i][j][k][0] );
}
}
frc1 = 0.0;
for (i = ibeg; i <= ifin1; i++) {
for (j = jbeg; j <= jfin1; j++) {
frc1 = frc1 + ( phi1[i][j]
+ phi1[i+1][j]
+ phi1[i][j+1]
+ phi1[i+1][j+1]
+ phi2[i][j]
+ phi2[i+1][j]
+ phi2[i][j+1]
+ phi2[i+1][j+1] );
}
}
frc1 = dxi * deta * frc1;
/*--------------------------------------------------------------------
c initialize
--------------------------------------------------------------------*/
for (i = 0; i <= ISIZ2+1; i++) {
for (k = 0; k <= ISIZ3+1; k++) {
phi1[i][k] = 0.0;
phi2[i][k] = 0.0;
}
}
jglob = jbeg;
if (jglob == ji1) {
for (i = ibeg; i <= ifin; i++) {
iglob = i;
for (k = ki1; k <= ki2; k++) {
phi1[i][k] = C2*( u[i][jbeg][k][4]
- 0.50 * ( pow2(u[i][jbeg][k][1])
+ pow2(u[i][jbeg][k][2])
+ pow2(u[i][jbeg][k][3]) )
/ u[i][jbeg][k][0] );
}
}
}
jglob = jfin;
if (jglob == ji2) {
for (i = ibeg; i <= ifin; i++) {
iglob = i;
for (k = ki1; k <= ki2; k++) {
phi2[i][k] = C2*( u[i][jfin][k][4]
- 0.50 * ( pow2(u[i][jfin][k][1])
+ pow2(u[i][jfin][k][2])
+ pow2(u[i][jfin][k][3]) )
/ u[i][jfin][k][0] );
}
}
}
frc2 = 0.0;
for (i = ibeg; i <= ifin1; i++) {
for (k = ki1; k <= ki2-1; k++) {
frc2 = frc2 + ( phi1[i][k]
+ phi1[i+1][k]
+ phi1[i][k+1]
+ phi1[i+1][k+1]
+ phi2[i][k]
+ phi2[i+1][k]
+ phi2[i][k+1]
+ phi2[i+1][k+1] );
}
}
frc2 = dxi * dzeta * frc2;
/*--------------------------------------------------------------------
c initialize
--------------------------------------------------------------------*/
for (i = 0; i <= ISIZ2+1; i++) {
for (k = 0; k <= ISIZ3+1; k++) {
phi1[i][k] = 0.0;
phi2[i][k] = 0.0;
}
}
iglob = ibeg;
if (iglob == ii1) {
for (j = jbeg; j <= jfin; j++) {
jglob = j;
for (k = ki1; k <= ki2; k++) {
phi1[j][k] = C2*( u[ibeg][j][k][4]
- 0.50 * ( pow2(u[ibeg][j][k][1])
+ pow2(u[ibeg][j][k][2])
+ pow2(u[ibeg][j][k][3]) )
/ u[ibeg][j][k][0] );
}
}
}
iglob = ifin;
if (iglob == ii2) {
for (j = jbeg; j <= jfin; j++) {
jglob = j;
for (k = ki1; k <= ki2; k++) {
phi2[j][k] = C2*( u[ifin][j][k][4]
- 0.50 * ( pow2(u[ifin][j][k][1])
+ pow2(u[ifin][j][k][2])
+ pow2(u[ifin][j][k][3]) )
/ u[ifin][j][k][0] );
}
}
}
frc3 = 0.0;
for (j = jbeg; j <= jfin1; j++) {
for (k = ki1; k <= ki2-1; k++) {
frc3 = frc3 + ( phi1[j][k]
+ phi1[j+1][k]
+ phi1[j][k+1]
+ phi1[j+1][k+1]
+ phi2[j][k]
+ phi2[j+1][k]
+ phi2[j][k+1]
+ phi2[j+1][k+1] );
}
}
frc3 = deta * dzeta * frc3;
frc = 0.25 * ( frc1 + frc2 + frc3 );
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void read_input(void) {
FILE *fp;
/*--------------------------------------------------------------------
c if input file does not exist, it uses defaults
c ipr = 1 for detailed progress output
c inorm = how often the norm is printed (once every inorm iterations)
c itmax = number of pseudo time steps
c dt = time step
c omega 1 over-relaxation factor for SSOR
c tolrsd = steady state residual tolerance levels
c nx, ny, nz = number of grid points in x, y, z directions
--------------------------------------------------------------------*/
printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version"
" - LU Benchmark\n\n");
fp = fopen("inputlu.data", "r");
if (fp != NULL) {
printf(" Reading from input file inputlu.data\n");
while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n');
fscanf(fp, "%d%d", &ipr, &inorm);
while(fgetc(fp) != '\n');
while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n');
fscanf(fp, "%d", &itmax);
while(fgetc(fp) != '\n');
while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n');
fscanf(fp, "%lf", &dt);
while(fgetc(fp) != '\n');
while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n');
fscanf(fp, "%lf", &omega);
while(fgetc(fp) != '\n');
while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n');
fscanf(fp, "%lf%lf%lf%lf%lf",
&tolrsd[0], &tolrsd[1], &tolrsd[2], &tolrsd[3], &tolrsd[4]);
while(fgetc(fp) != '\n');
while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n');
fscanf(fp, "%d%d%d", &nx0, &ny0, &nz0);
while(fgetc(fp) != '\n');
fclose(fp);
} else {
ipr = IPR_DEFAULT;
inorm = INORM_DEFAULT;
itmax = ITMAX_DEFAULT;
dt = DT_DEFAULT;
omega = OMEGA_DEFAULT;
tolrsd[0] = TOLRSD1_DEF;
tolrsd[1] = TOLRSD2_DEF;
tolrsd[2] = TOLRSD3_DEF;
tolrsd[3] = TOLRSD4_DEF;
tolrsd[4] = TOLRSD5_DEF;
nx0 = ISIZ1;
ny0 = ISIZ2;
nz0 = ISIZ3;
}
/*--------------------------------------------------------------------
c check problem size
--------------------------------------------------------------------*/
if ( nx0 < 4 || ny0 < 4 || nz0 < 4 ) {
printf(" PROBLEM SIZE IS TOO SMALL - \n"
" SET EACH OF NX, NY AND NZ AT LEAST EQUAL TO 5\n");
exit(1);
}
if ( nx0 > ISIZ1 || ny0 > ISIZ2 || nz0 > ISIZ3 ) {
printf(" PROBLEM SIZE IS TOO LARGE - \n"
" NX, NY AND NZ SHOULD BE EQUAL TO \n"
" ISIZ1, ISIZ2 AND ISIZ3 RESPECTIVELY\n");
exit(1);
}
printf(" Size: %3dx%3dx%3d\n", nx0, ny0, nz0);
printf(" Iterations: %3d\n", itmax);
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void rhs(void) {
#pragma omp parallel
{
/*--------------------------------------------------------------------
c compute the right hand sides
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, k, m;
int L1, L2;
int ist1, iend1;
int jst1, jend1;
double q;
double u21, u31, u41;
double tmp;
double u21i, u31i, u41i, u51i;
double u21j, u31j, u41j, u51j;
double u21k, u31k, u41k, u51k;
double u21im1, u31im1, u41im1, u51im1;
double u21jm1, u31jm1, u41jm1, u51jm1;
double u21km1, u31km1, u41km1, u51km1;
#pragma omp for
for (i = 0; i <= nx-1; i++) {
for (j = 0; j <= ny-1; j++) {
for (k = 0; k <= nz-1; k++) {
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = - frct[i][j][k][m];
}
}
}
}
/*--------------------------------------------------------------------
c xi-direction flux differences
--------------------------------------------------------------------*/
L1 = 0;
L2 = nx-1;
#pragma omp for
for (i = L1; i <= L2; i++) {
for (j = jst; j <= jend; j++) {
for (k = 1; k <= nz - 2; k++) {
flux[i][j][k][0] = u[i][j][k][1];
u21 = u[i][j][k][1] / u[i][j][k][0];
q = 0.50 * ( u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] )
/ u[i][j][k][0];
flux[i][j][k][1] = u[i][j][k][1] * u21 + C2 *
( u[i][j][k][4] - q );
flux[i][j][k][2] = u[i][j][k][2] * u21;
flux[i][j][k][3] = u[i][j][k][3] * u21;
flux[i][j][k][4] = ( C1 * u[i][j][k][4] - C2 * q ) * u21;
}
}
}
#pragma omp for
for (j = jst; j <= jend; j++) {
for (k = 1; k <= nz - 2; k++) {
for (i = ist; i <= iend; i++) {
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = rsd[i][j][k][m]
- tx2 * ( flux[i+1][j][k][m] - flux[i-1][j][k][m] );
}
}
L2 = nx-1;
for (i = ist; i <= L2; i++) {
tmp = 1.0 / u[i][j][k][0];
u21i = tmp * u[i][j][k][1];
u31i = tmp * u[i][j][k][2];
u41i = tmp * u[i][j][k][3];
u51i = tmp * u[i][j][k][4];
tmp = 1.0 / u[i-1][j][k][0];
u21im1 = tmp * u[i-1][j][k][1];
u31im1 = tmp * u[i-1][j][k][2];
u41im1 = tmp * u[i-1][j][k][3];
u51im1 = tmp * u[i-1][j][k][4];
flux[i][j][k][1] = (4.0/3.0) * tx3 * (u21i-u21im1);
flux[i][j][k][2] = tx3 * ( u31i - u31im1 );
flux[i][j][k][3] = tx3 * ( u41i - u41im1 );
flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )
* tx3 * ( ( pow2(u21i) + pow2(u31i) + pow2(u41i) )
- ( pow2(u21im1) + pow2(u31im1) + pow2(u41im1) ) )
+ (1.0/6.0)
* tx3 * ( pow2(u21i) - pow2(u21im1) )
+ C1 * C5 * tx3 * ( u51i - u51im1 );
}
for (i = ist; i <= iend; i++) {
rsd[i][j][k][0] = rsd[i][j][k][0]
+ dx1 * tx1 * ( u[i-1][j][k][0]
- 2.0 * u[i][j][k][0]
+ u[i+1][j][k][0] );
rsd[i][j][k][1] = rsd[i][j][k][1]
+ tx3 * C3 * C4 * ( flux[i+1][j][k][1] - flux[i][j][k][1] )
+ dx2 * tx1 * ( u[i-1][j][k][1]
- 2.0 * u[i][j][k][1]
+ u[i+1][j][k][1] );
rsd[i][j][k][2] = rsd[i][j][k][2]
+ tx3 * C3 * C4 * ( flux[i+1][j][k][2] - flux[i][j][k][2] )
+ dx3 * tx1 * ( u[i-1][j][k][2]
- 2.0 * u[i][j][k][2]
+ u[i+1][j][k][2] );
rsd[i][j][k][3] = rsd[i][j][k][3]
+ tx3 * C3 * C4 * ( flux[i+1][j][k][3] - flux[i][j][k][3] )
+ dx4 * tx1 * ( u[i-1][j][k][3]
- 2.0 * u[i][j][k][3]
+ u[i+1][j][k][3] );
rsd[i][j][k][4] = rsd[i][j][k][4]
+ tx3 * C3 * C4 * ( flux[i+1][j][k][4] - flux[i][j][k][4] )
+ dx5 * tx1 * ( u[i-1][j][k][4]
- 2.0 * u[i][j][k][4]
+ u[i+1][j][k][4] );
}
/*--------------------------------------------------------------------
c Fourth-order dissipation
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
rsd[1][j][k][m] = rsd[1][j][k][m]
- dssp * ( + 5.0 * u[1][j][k][m]
- 4.0 * u[2][j][k][m]
+ u[3][j][k][m] );
rsd[2][j][k][m] = rsd[2][j][k][m]
- dssp * ( - 4.0 * u[1][j][k][m]
+ 6.0 * u[2][j][k][m]
- 4.0 * u[3][j][k][m]
+ u[4][j][k][m] );
}
ist1 = 3;
iend1 = nx - 4;
for (i = ist1; i <= iend1; i++) {
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = rsd[i][j][k][m]
- dssp * ( u[i-2][j][k][m]
- 4.0 * u[i-1][j][k][m]
+ 6.0 * u[i][j][k][m]
- 4.0 * u[i+1][j][k][m]
+ u[i+2][j][k][m] );
}
}
for (m = 0; m < 5; m++) {
rsd[nx-3][j][k][m] = rsd[nx-3][j][k][m]
- dssp * ( u[nx-5][j][k][m]
- 4.0 * u[nx-4][j][k][m]
+ 6.0 * u[nx-3][j][k][m]
- 4.0 * u[nx-2][j][k][m] );
rsd[nx-2][j][k][m] = rsd[nx-2][j][k][m]
- dssp * ( u[nx-4][j][k][m]
- 4.0 * u[nx-3][j][k][m]
+ 5.0 * u[nx-2][j][k][m] );
}
}
}
/*--------------------------------------------------------------------
c eta-direction flux differences
--------------------------------------------------------------------*/
L1 = 0;
L2 = ny-1;
#pragma omp for
for (i = ist; i <= iend; i++) {
for (j = L1; j <= L2; j++) {
for (k = 1; k <= nz - 2; k++) {
flux[i][j][k][0] = u[i][j][k][2];
u31 = u[i][j][k][2] / u[i][j][k][0];
q = 0.50 * ( u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] )
/ u[i][j][k][0];
flux[i][j][k][1] = u[i][j][k][1] * u31;
flux[i][j][k][2] = u[i][j][k][2] * u31 + C2 * (u[i][j][k][4]-q);
flux[i][j][k][3] = u[i][j][k][3] * u31;
flux[i][j][k][4] = ( C1 * u[i][j][k][4] - C2 * q ) * u31;
}
}
}
#pragma omp for
for (i = ist; i <= iend; i++) {
for (k = 1; k <= nz - 2; k++) {
for (j = jst; j <= jend; j++) {
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = rsd[i][j][k][m]
- ty2 * ( flux[i][j+1][k][m] - flux[i][j-1][k][m] );
}
}
L2 = ny-1;
for (j = jst; j <= L2; j++) {
tmp = 1.0 / u[i][j][k][0];
u21j = tmp * u[i][j][k][1];
u31j = tmp * u[i][j][k][2];
u41j = tmp * u[i][j][k][3];
u51j = tmp * u[i][j][k][4];
tmp = 1.0 / u[i][j-1][k][0];
u21jm1 = tmp * u[i][j-1][k][1];
u31jm1 = tmp * u[i][j-1][k][2];
u41jm1 = tmp * u[i][j-1][k][3];
u51jm1 = tmp * u[i][j-1][k][4];
flux[i][j][k][1] = ty3 * ( u21j - u21jm1 );
flux[i][j][k][2] = (4.0/3.0) * ty3 * (u31j-u31jm1);
flux[i][j][k][3] = ty3 * ( u41j - u41jm1 );
flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )
* ty3 * ( ( pow2(u21j) + pow2(u31j) + pow2(u41j) )
- ( pow2(u21jm1) + pow2(u31jm1) + pow2(u41jm1) ) )
+ (1.0/6.0)
* ty3 * ( pow2(u31j) - pow2(u31jm1) )
+ C1 * C5 * ty3 * ( u51j - u51jm1 );
}
for (j = jst; j <= jend; j++) {
rsd[i][j][k][0] = rsd[i][j][k][0]
+ dy1 * ty1 * ( u[i][j-1][k][0]
- 2.0 * u[i][j][k][0]
+ u[i][j+1][k][0] );
rsd[i][j][k][1] = rsd[i][j][k][1]
+ ty3 * C3 * C4 * ( flux[i][j+1][k][1] - flux[i][j][k][1] )
+ dy2 * ty1 * ( u[i][j-1][k][1]
- 2.0 * u[i][j][k][1]
+ u[i][j+1][k][1] );
rsd[i][j][k][2] = rsd[i][j][k][2]
+ ty3 * C3 * C4 * ( flux[i][j+1][k][2] - flux[i][j][k][2] )
+ dy3 * ty1 * ( u[i][j-1][k][2]
- 2.0 * u[i][j][k][2]
+ u[i][j+1][k][2] );
rsd[i][j][k][3] = rsd[i][j][k][3]
+ ty3 * C3 * C4 * ( flux[i][j+1][k][3] - flux[i][j][k][3] )
+ dy4 * ty1 * ( u[i][j-1][k][3]
- 2.0 * u[i][j][k][3]
+ u[i][j+1][k][3] );
rsd[i][j][k][4] = rsd[i][j][k][4]
+ ty3 * C3 * C4 * ( flux[i][j+1][k][4] - flux[i][j][k][4] )
+ dy5 * ty1 * ( u[i][j-1][k][4]
- 2.0 * u[i][j][k][4]
+ u[i][j+1][k][4] );
}
/*--------------------------------------------------------------------
c fourth-order dissipation
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
rsd[i][1][k][m] = rsd[i][1][k][m]
- dssp * ( + 5.0 * u[i][1][k][m]
- 4.0 * u[i][2][k][m]
+ u[i][3][k][m] );
rsd[i][2][k][m] = rsd[i][2][k][m]
- dssp * ( - 4.0 * u[i][1][k][m]
+ 6.0 * u[i][2][k][m]
- 4.0 * u[i][3][k][m]
+ u[i][4][k][m] );
}
jst1 = 3;
jend1 = ny - 4;
for (j = jst1; j <= jend1; j++) {
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = rsd[i][j][k][m]
- dssp * ( u[i][j-2][k][m]
- 4.0 * u[i][j-1][k][m]
+ 6.0 * u[i][j][k][m]
- 4.0 * u[i][j+1][k][m]
+ u[i][j+2][k][m] );
}
}
for (m = 0; m < 5; m++) {
rsd[i][ny-3][k][m] = rsd[i][ny-3][k][m]
- dssp * ( u[i][ny-5][k][m]
- 4.0 * u[i][ny-4][k][m]
+ 6.0 * u[i][ny-3][k][m]
- 4.0 * u[i][ny-2][k][m] );
rsd[i][ny-2][k][m] = rsd[i][ny-2][k][m]
- dssp * ( u[i][ny-4][k][m]
- 4.0 * u[i][ny-3][k][m]
+ 5.0 * u[i][ny-2][k][m] );
}
}
}
/*--------------------------------------------------------------------
c zeta-direction flux differences
--------------------------------------------------------------------*/
#pragma omp for
for (i = ist; i <= iend; i++) {
for (j = jst; j <= jend; j++) {
for (k = 0; k <= nz-1; k++) {
flux[i][j][k][0] = u[i][j][k][3];
u41 = u[i][j][k][3] / u[i][j][k][0];
q = 0.50 * ( u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] )
/ u[i][j][k][0];
flux[i][j][k][1] = u[i][j][k][1] * u41;
flux[i][j][k][2] = u[i][j][k][2] * u41;
flux[i][j][k][3] = u[i][j][k][3] * u41 + C2 * (u[i][j][k][4]-q);
flux[i][j][k][4] = ( C1 * u[i][j][k][4] - C2 * q ) * u41;
}
for (k = 1; k <= nz - 2; k++) {
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = rsd[i][j][k][m]
- tz2 * ( flux[i][j][k+1][m] - flux[i][j][k-1][m] );
}
}
for (k = 1; k <= nz-1; k++) {
tmp = 1.0 / u[i][j][k][0];
u21k = tmp * u[i][j][k][1];
u31k = tmp * u[i][j][k][2];
u41k = tmp * u[i][j][k][3];
u51k = tmp * u[i][j][k][4];
tmp = 1.0 / u[i][j][k-1][0];
u21km1 = tmp * u[i][j][k-1][1];
u31km1 = tmp * u[i][j][k-1][2];
u41km1 = tmp * u[i][j][k-1][3];
u51km1 = tmp * u[i][j][k-1][4];
flux[i][j][k][1] = tz3 * ( u21k - u21km1 );
flux[i][j][k][2] = tz3 * ( u31k - u31km1 );
flux[i][j][k][3] = (4.0/3.0) * tz3 * (u41k-u41km1);
flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 )
* tz3 * ( ( pow2(u21k) + pow2(u31k) + pow2(u41k) )
- ( pow2(u21km1) + pow2(u31km1) + pow2(u41km1) ) )
+ (1.0/6.0)
* tz3 * ( pow2(u41k) - pow2(u41km1) )
+ C1 * C5 * tz3 * ( u51k - u51km1 );
}
for (k = 1; k <= nz - 2; k++) {
rsd[i][j][k][0] = rsd[i][j][k][0]
+ dz1 * tz1 * ( u[i][j][k-1][0]
- 2.0 * u[i][j][k][0]
+ u[i][j][k+1][0] );
rsd[i][j][k][1] = rsd[i][j][k][1]
+ tz3 * C3 * C4 * ( flux[i][j][k+1][1] - flux[i][j][k][1] )
+ dz2 * tz1 * ( u[i][j][k-1][1]
- 2.0 * u[i][j][k][1]
+ u[i][j][k+1][1] );
rsd[i][j][k][2] = rsd[i][j][k][2]
+ tz3 * C3 * C4 * ( flux[i][j][k+1][2] - flux[i][j][k][2] )
+ dz3 * tz1 * ( u[i][j][k-1][2]
- 2.0 * u[i][j][k][2]
+ u[i][j][k+1][2] );
rsd[i][j][k][3] = rsd[i][j][k][3]
+ tz3 * C3 * C4 * ( flux[i][j][k+1][3] - flux[i][j][k][3] )
+ dz4 * tz1 * ( u[i][j][k-1][3]
- 2.0 * u[i][j][k][3]
+ u[i][j][k+1][3] );
rsd[i][j][k][4] = rsd[i][j][k][4]
+ tz3 * C3 * C4 * ( flux[i][j][k+1][4] - flux[i][j][k][4] )
+ dz5 * tz1 * ( u[i][j][k-1][4]
- 2.0 * u[i][j][k][4]
+ u[i][j][k+1][4] );
}
/*--------------------------------------------------------------------
c fourth-order dissipation
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
rsd[i][j][1][m] = rsd[i][j][1][m]
- dssp * ( + 5.0 * u[i][j][1][m]
- 4.0 * u[i][j][2][m]
+ u[i][j][3][m] );
rsd[i][j][2][m] = rsd[i][j][2][m]
- dssp * ( - 4.0 * u[i][j][1][m]
+ 6.0 * u[i][j][2][m]
- 4.0 * u[i][j][3][m]
+ u[i][j][4][m] );
}
for (k = 3; k <= nz - 4; k++) {
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = rsd[i][j][k][m]
- dssp * ( u[i][j][k-2][m]
- 4.0 * u[i][j][k-1][m]
+ 6.0 * u[i][j][k][m]
- 4.0 * u[i][j][k+1][m]
+ u[i][j][k+2][m] );
}
}
for (m = 0; m < 5; m++) {
rsd[i][j][nz-3][m] = rsd[i][j][nz-3][m]
- dssp * ( u[i][j][nz-5][m]
- 4.0 * u[i][j][nz-4][m]
+ 6.0 * u[i][j][nz-3][m]
- 4.0 * u[i][j][nz-2][m] );
rsd[i][j][nz-2][m] = rsd[i][j][nz-2][m]
- dssp * ( u[i][j][nz-4][m]
- 4.0 * u[i][j][nz-3][m]
+ 5.0 * u[i][j][nz-2][m] );
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void setbv(void) {
#pragma omp parallel
{
/*--------------------------------------------------------------------
c set the boundary values of dependent variables
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, k;
int iglob, jglob;
/*--------------------------------------------------------------------
c set the dependent variable values along the top and bottom faces
--------------------------------------------------------------------*/
#pragma omp for
for (i = 0; i < nx; i++) {
iglob = i;
for (j = 0; j < ny; j++) {
jglob = j;
exact( iglob, jglob, 0, &u[i][j][0][0] );
exact( iglob, jglob, nz-1, &u[i][j][nz-1][0] );
}
}
/*--------------------------------------------------------------------
c set the dependent variable values along north and south faces
--------------------------------------------------------------------*/
#pragma omp for
for (i = 0; i < nx; i++) {
iglob = i;
for (k = 0; k < nz; k++) {
exact( iglob, 0, k, &u[i][0][k][0] );
}
}
#pragma omp for
for (i = 0; i < nx; i++) {
iglob = i;
for (k = 0; k < nz; k++) {
exact( iglob, ny0-1, k, &u[i][ny-1][k][0] );
}
}
/*--------------------------------------------------------------------
c set the dependent variable values along east and west faces
--------------------------------------------------------------------*/
#pragma omp for
for (j = 0; j < ny; j++) {
jglob = j;
for (k = 0; k < nz; k++) {
exact( 0, jglob, k, &u[0][j][k][0] );
}
}
#pragma omp for
for (j = 0; j < ny; j++) {
jglob = j;
for (k = 0; k < nz; k++) {
exact( nx0-1, jglob, k, &u[nx-1][j][k][0] );
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void setcoeff(void) {
/*--------------------------------------------------------------------
c set up coefficients
--------------------------------------------------------------------*/
dxi = 1.0 / ( nx0 - 1 );
deta = 1.0 / ( ny0 - 1 );
dzeta = 1.0 / ( nz0 - 1 );
tx1 = 1.0 / ( dxi * dxi );
tx2 = 1.0 / ( 2.0 * dxi );
tx3 = 1.0 / dxi;
ty1 = 1.0 / ( deta * deta );
ty2 = 1.0 / ( 2.0 * deta );
ty3 = 1.0 / deta;
tz1 = 1.0 / ( dzeta * dzeta );
tz2 = 1.0 / ( 2.0 * dzeta );
tz3 = 1.0 / dzeta;
ii1 = 1;
ii2 = nx0 - 2;
ji1 = 1;
ji2 = ny0 - 3;
ki1 = 2;
ki2 = nz0 - 2;
/*--------------------------------------------------------------------
c diffusion coefficients
--------------------------------------------------------------------*/
dx1 = 0.75;
dx2 = dx1;
dx3 = dx1;
dx4 = dx1;
dx5 = dx1;
dy1 = 0.75;
dy2 = dy1;
dy3 = dy1;
dy4 = dy1;
dy5 = dy1;
dz1 = 1.00;
dz2 = dz1;
dz3 = dz1;
dz4 = dz1;
dz5 = dz1;
/*--------------------------------------------------------------------
c fourth difference dissipation
--------------------------------------------------------------------*/
dssp = ( max (dx1, max(dy1, dz1) ) ) / 4.0;
/*--------------------------------------------------------------------
c coefficients of the exact solution to the first pde
--------------------------------------------------------------------*/
ce[0][0] = 2.0;
ce[0][1] = 0.0;
ce[0][2] = 0.0;
ce[0][3] = 4.0;
ce[0][4] = 5.0;
ce[0][5] = 3.0;
ce[0][6] = 5.0e-01;
ce[0][7] = 2.0e-02;
ce[0][8] = 1.0e-02;
ce[0][9] = 3.0e-02;
ce[0][10] = 5.0e-01;
ce[0][11] = 4.0e-01;
ce[0][12] = 3.0e-01;
/*--------------------------------------------------------------------
c coefficients of the exact solution to the second pde
--------------------------------------------------------------------*/
ce[1][0] = 1.0;
ce[1][1] = 0.0;
ce[1][2] = 0.0;
ce[1][3] = 0.0;
ce[1][4] = 1.0;
ce[1][5] = 2.0;
ce[1][6] = 3.0;
ce[1][7] = 1.0e-02;
ce[1][8] = 3.0e-02;
ce[1][9] = 2.0e-02;
ce[1][10] = 4.0e-01;
ce[1][11] = 3.0e-01;
ce[1][12] = 5.0e-01;
/*--------------------------------------------------------------------
c coefficients of the exact solution to the third pde
--------------------------------------------------------------------*/
ce[2][0] = 2.0;
ce[2][1] = 2.0;
ce[2][2] = 0.0;
ce[2][3] = 0.0;
ce[2][4] = 0.0;
ce[2][5] = 2.0;
ce[2][6] = 3.0;
ce[2][7] = 4.0e-02;
ce[2][8] = 3.0e-02;
ce[2][9] = 5.0e-02;
ce[2][10] = 3.0e-01;
ce[2][11] = 5.0e-01;
ce[2][12] = 4.0e-01;
/*--------------------------------------------------------------------
c coefficients of the exact solution to the fourth pde
--------------------------------------------------------------------*/
ce[3][0] = 2.0;
ce[3][1] = 2.0;
ce[3][2] = 0.0;
ce[3][3] = 0.0;
ce[3][4] = 0.0;
ce[3][5] = 2.0;
ce[3][6] = 3.0;
ce[3][7] = 3.0e-02;
ce[3][8] = 5.0e-02;
ce[3][9] = 4.0e-02;
ce[3][10] = 2.0e-01;
ce[3][11] = 1.0e-01;
ce[3][12] = 3.0e-01;
/*--------------------------------------------------------------------
c coefficients of the exact solution to the fifth pde
--------------------------------------------------------------------*/
ce[4][0] = 5.0;
ce[4][1] = 4.0;
ce[4][2] = 3.0;
ce[4][3] = 2.0;
ce[4][4] = 1.0e-01;
ce[4][5] = 4.0e-01;
ce[4][6] = 3.0e-01;
ce[4][7] = 5.0e-02;
ce[4][8] = 4.0e-02;
ce[4][9] = 3.0e-02;
ce[4][10] = 1.0e-01;
ce[4][11] = 3.0e-01;
ce[4][12] = 2.0e-01;
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void setiv(void) {
#pragma omp parallel
{
/*--------------------------------------------------------------------
c
c set the initial values of independent variables based on tri-linear
c interpolation of boundary values in the computational space.
c
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, k, m;
int iglob, jglob;
double xi, eta, zeta;
double pxi, peta, pzeta;
double ue_1jk[5],ue_nx0jk[5],ue_i1k[5],
ue_iny0k[5],ue_ij1[5],ue_ijnz[5];
#pragma omp for
for (j = 0; j < ny; j++) {
jglob = j;
for (k = 1; k < nz - 1; k++) {
zeta = ((double)k) / (nz-1);
if (jglob != 0 && jglob != ny0-1) {
eta = ( (double) (jglob) ) / (ny0-1);
for (i = 0; i < nx; i++) {
iglob = i;
if(iglob != 0 && iglob != nx0-1) {
xi = ( (double) (iglob) ) / (nx0-1);
exact (0,jglob,k,ue_1jk);
exact (nx0-1,jglob,k,ue_nx0jk);
exact (iglob,0,k,ue_i1k);
exact (iglob,ny0-1,k,ue_iny0k);
exact (iglob,jglob,0,ue_ij1);
exact (iglob,jglob,nz-1,ue_ijnz);
for (m = 0; m < 5; m++) {
pxi = ( 1.0 - xi ) * ue_1jk[m]
+ xi * ue_nx0jk[m];
peta = ( 1.0 - eta ) * ue_i1k[m]
+ eta * ue_iny0k[m];
pzeta = ( 1.0 - zeta ) * ue_ij1[m]
+ zeta * ue_ijnz[m];
u[i][j][k][m] = pxi + peta + pzeta
- pxi * peta - peta * pzeta - pzeta * pxi
+ pxi * peta * pzeta;
}
}
}
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void ssor(void) {
/*--------------------------------------------------------------------
c to perform pseudo-time stepping SSOR iterations
c for five nonlinear pde s.
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c local variables
--------------------------------------------------------------------*/
int i, j, k, m;
int istep;
double tmp;
double delunm[5], tv[ISIZ1][ISIZ2][5];
/*--------------------------------------------------------------------
c begin pseudo-time stepping iterations
--------------------------------------------------------------------*/
tmp = 1.0 / ( omega * ( 2.0 - omega ) ) ;
/*--------------------------------------------------------------------
c initialize a,b,c,d to zero (guarantees that page tables have been
c formed, if applicable on given architecture, before timestepping).
--------------------------------------------------------------------*/
#pragma omp parallel private(i,j,k,m)
{
#pragma omp for
for (i = 0; i < ISIZ1; i++) {
for (j = 0; j < ISIZ2; j++) {
for (k = 0; k < 5; k++) {
for (m = 0; m < 5; m++) {
a[i][j][k][m] = 0.0;
b[i][j][k][m] = 0.0;
c[i][j][k][m] = 0.0;
d[i][j][k][m] = 0.0;
}
}
}
}
}
/*--------------------------------------------------------------------
c compute the steady-state residuals
--------------------------------------------------------------------*/
rhs();
/*--------------------------------------------------------------------
c compute the L2 norms of newton iteration residuals
--------------------------------------------------------------------*/
l2norm( nx0, ny0, nz0,
ist, iend, jst, jend,
rsd, rsdnm );
timer_clear(1);
timer_start(1);
/*--------------------------------------------------------------------
c the timestep loop
--------------------------------------------------------------------*/
for (istep = 1; istep <= itmax; istep++) {
if (istep%20 == 0 || istep == itmax || istep == 1) {
#pragma omp master
printf(" Time step %4d\n", istep);
}
#pragma omp parallel private(istep,i,j,k,m)
{
/*--------------------------------------------------------------------
c perform SSOR iteration
--------------------------------------------------------------------*/
#pragma omp for
for (i = ist; i <= iend; i++) {
for (j = jst; j <= jend; j++) {
for (k = 1; k <= nz - 2; k++) {
for (m = 0; m < 5; m++) {
rsd[i][j][k][m] = dt * rsd[i][j][k][m];
}
}
}
}
for (k = 1; k <= nz - 2; k++) {
/*--------------------------------------------------------------------
c form the lower triangular part of the jacobian matrix
--------------------------------------------------------------------*/
jacld(k);
/*--------------------------------------------------------------------
c perform the lower triangular solution
--------------------------------------------------------------------*/
blts(nx, ny, nz, k,
omega,
rsd,
a, b, c, d,
ist, iend, jst, jend,
nx0, ny0 );
}
#pragma omp barrier
for (k = nz - 2; k >= 1; k--) {
/*--------------------------------------------------------------------
c form the strictly upper triangular part of the jacobian matrix
--------------------------------------------------------------------*/
jacu(k);
/*--------------------------------------------------------------------
c perform the upper triangular solution
--------------------------------------------------------------------*/
buts(nx, ny, nz, k,
omega,
rsd, tv,
d, a, b, c,
ist, iend, jst, jend,
nx0, ny0 );
}
#pragma omp barrier
/*--------------------------------------------------------------------
c update the variables
--------------------------------------------------------------------*/
#pragma omp for
for (i = ist; i <= iend; i++) {
for (j = jst; j <= jend; j++) {
for (k = 1; k <= nz-2; k++) {
for (m = 0; m < 5; m++) {
u[i][j][k][m] = u[i][j][k][m]
+ tmp * rsd[i][j][k][m];
}
}
}
}
} /* end parallel */
/*--------------------------------------------------------------------
c compute the max-norms of newton iteration corrections
--------------------------------------------------------------------*/
if ( istep % inorm == 0 ) {
l2norm( nx0, ny0, nz0,
ist, iend, jst, jend,
rsd, delunm );
}
/*--------------------------------------------------------------------
c compute the steady-state residuals
--------------------------------------------------------------------*/
rhs();
/*--------------------------------------------------------------------
c compute the max-norms of newton iteration residuals
--------------------------------------------------------------------*/
if ( ( istep % inorm == 0 ) ||
( istep == itmax ) ) {
l2norm( nx0, ny0, nz0,
ist, iend, jst, jend,
rsd, rsdnm );
}
/*--------------------------------------------------------------------
c check the newton-iteration residuals against the tolerance levels
--------------------------------------------------------------------*/
if ( ( rsdnm[0] < tolrsd[0] ) &&
( rsdnm[1] < tolrsd[1] ) &&
( rsdnm[2] < tolrsd[2] ) &&
( rsdnm[3] < tolrsd[3] ) &&
( rsdnm[4] < tolrsd[4] ) ) {
exit(1);
}
}
timer_stop(1);
maxtime= timer_read(1);
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void verify(double xcr[5], double xce[5], double xci,
char *class, boolean *verified) {
/*--------------------------------------------------------------------
c verification routine
--------------------------------------------------------------------*/
double xcrref[5],xceref[5],xciref,
xcrdif[5],xcedif[5],xcidif,
epsilon, dtref;
int m;
/*--------------------------------------------------------------------
c tolerance level
--------------------------------------------------------------------*/
epsilon = 1.0e-08;
*class = 'U';
*verified = TRUE;
for (m = 0; m < 5; m++) {
xcrref[m] = 1.0;
xceref[m] = 1.0;
}
xciref = 1.0;
if ( nx0 == 12 && ny0 == 12 && nz0 == 12 && itmax == 50) {
*class = 'S';
dtref = 5.0e-1;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual, for the (12X12X12) grid,
c after 50 time steps, with DT = 5.0d-01
--------------------------------------------------------------------*/
xcrref[0] = 1.6196343210976702e-02;
xcrref[1] = 2.1976745164821318e-03;
xcrref[2] = 1.5179927653399185e-03;
xcrref[3] = 1.5029584435994323e-03;
xcrref[4] = 3.4264073155896461e-02;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error, for the (12X12X12) grid,
c after 50 time steps, with DT = 5.0d-01
--------------------------------------------------------------------*/
xceref[0] = 6.4223319957960924e-04;
xceref[1] = 8.4144342047347926e-05;
xceref[2] = 5.8588269616485186e-05;
xceref[3] = 5.8474222595157350e-05;
xceref[4] = 1.3103347914111294e-03;
/*--------------------------------------------------------------------
c Reference value of surface integral, for the (12X12X12) grid,
c after 50 time steps, with DT = 5.0d-01
--------------------------------------------------------------------*/
xciref = 7.8418928865937083;
} else if ( nx0 == 33 && ny0 == 33 && nz0 == 33 && itmax == 300) {
*class = 'W'; /* SPEC95fp size */
dtref = 1.5e-3;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual, for the (33x33x33) grid,
c after 300 time steps, with DT = 1.5d-3
--------------------------------------------------------------------*/
xcrref[0] = 0.1236511638192e+02;
xcrref[1] = 0.1317228477799e+01;
xcrref[2] = 0.2550120713095e+01;
xcrref[3] = 0.2326187750252e+01;
xcrref[4] = 0.2826799444189e+02;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error, for the (33X33X33) grid,
--------------------------------------------------------------------*/
xceref[0] = 0.4867877144216;
xceref[1] = 0.5064652880982e-01;
xceref[2] = 0.9281818101960e-01;
xceref[3] = 0.8570126542733e-01;
xceref[4] = 0.1084277417792e+01;
/*--------------------------------------------------------------------
c Reference value of surface integral, for the (33X33X33) grid,
c after 300 time steps, with DT = 1.5d-3
--------------------------------------------------------------------*/
xciref = 0.1161399311023e+02;
} else if ( nx0 == 64 && ny0 == 64 && nz0 == 64 && itmax == 250) {
*class = 'A';
dtref = 2.0e+0;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual, for the (64X64X64) grid,
c after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xcrref[0] = 7.7902107606689367e+02;
xcrref[1] = 6.3402765259692870e+01;
xcrref[2] = 1.9499249727292479e+02;
xcrref[3] = 1.7845301160418537e+02;
xcrref[4] = 1.8384760349464247e+03;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error, for the (64X64X64) grid,
c after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xceref[0] = 2.9964085685471943e+01;
xceref[1] = 2.8194576365003349;
xceref[2] = 7.3473412698774742;
xceref[3] = 6.7139225687777051;
xceref[4] = 7.0715315688392578e+01;
/*--------------------------------------------------------------------
c Reference value of surface integral, for the (64X64X64) grid,
c after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xciref = 2.6030925604886277e+01;
} else if ( nx0 == 102 && ny0 == 102 && nz0 == 102 && itmax == 250) {
*class = 'B';
dtref = 2.0e+0;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual, for the (102X102X102) grid,
c after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xcrref[0] = 3.5532672969982736e+03;
xcrref[1] = 2.6214750795310692e+02;
xcrref[2] = 8.8333721850952190e+02;
xcrref[3] = 7.7812774739425265e+02;
xcrref[4] = 7.3087969592545314e+03;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error, for the (102X102X102)
c grid, after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xceref[0] = 1.1401176380212709e+02;
xceref[1] = 8.1098963655421574;
xceref[2] = 2.8480597317698308e+01;
xceref[3] = 2.5905394567832939e+01;
xceref[4] = 2.6054907504857413e+02;
/*--------------------------------------------------------------------
c Reference value of surface integral, for the (102X102X102) grid,
c after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xciref = 4.7887162703308227e+01;
} else if ( nx0 == 162 && ny0 == 162 && nz0 == 162 && itmax == 250) {
*class = 'C';
dtref = 2.0e+0;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual, for the (162X162X162) grid,
c after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xcrref[0] = 1.03766980323537846e+04;
xcrref[1] = 8.92212458801008552e+02;
xcrref[2] = 2.56238814582660871e+03;
xcrref[3] = 2.19194343857831427e+03;
xcrref[4] = 1.78078057261061185e+04;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error, for the (162X162X162)
c grid, after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xceref[0] = 2.15986399716949279e+02;
xceref[1] = 1.55789559239863600e+01;
xceref[2] = 5.41318863077207766e+01;
xceref[3] = 4.82262643154045421e+01;
xceref[4] = 4.55902910043250358e+02;
/*--------------------------------------------------------------------
c Reference value of surface integral, for the (162X162X162) grid,
c after 250 time steps, with DT = 2.0d+0.0
--------------------------------------------------------------------*/
xciref = 6.66404553572181300e+01;
} else {
*verified = FALSE;
}
/*--------------------------------------------------------------------
c verification test for residuals if gridsize is either 12X12X12 or
c 64X64X64 or 102X102X102 or 162X162X162
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Compute the difference of solution values and the known reference values.
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
xcrdif[m] = fabs((xcr[m]-xcrref[m])/xcrref[m]);
xcedif[m] = fabs((xce[m]-xceref[m])/xceref[m]);
}
xcidif = fabs((xci - xciref)/xciref);
/*--------------------------------------------------------------------
c Output the comparison of computed results to known cases.
--------------------------------------------------------------------*/
if (*class != 'U') {
printf("\n Verification being performed for class %1c\n", *class);
printf(" Accuracy setting for epsilon = %20.13e\n", epsilon);
if (fabs(dt-dtref) > epsilon) {
*verified = FALSE;
*class = 'U';
printf(" DT does not match the reference value of %15.8e\n", dtref);
}
} else {
printf(" Unknown class\n");
}
if (*class != 'U') {
printf(" Comparison of RMS-norms of residual\n");
} else {
printf(" RMS-norms of residual\n");
}
for (m = 0; m < 5; m++) {
if (*class == 'U') {
printf(" %2d %20.13e\n", m, xcr[m]);
} else if (xcrdif[m] > epsilon) {
*verified = FALSE;
printf(" FAILURE: %2d %20.13e%20.13e%20.13e\n",
m,xcr[m],xcrref[m],xcrdif[m]);
} else {
printf(" %2d %20.13e%20.13e%20.13e\n",
m,xcr[m],xcrref[m],xcrdif[m]);
}
}
if (*class != 'U') {
printf(" Comparison of RMS-norms of solution error\n");
} else {
printf(" RMS-norms of solution error\n");
}
for (m = 0; m < 5; m++) {
if (*class == 'U') {
printf(" %2d %20.13e\n", m, xce[m]);
} else if (xcedif[m] > epsilon) {
*verified = FALSE;
printf(" FAILURE: %2d %20.13e%20.13e%20.13e\n",
m,xce[m],xceref[m],xcedif[m]);
} else {
printf(" %2d %20.13e%20.13e%20.13e\n",
m,xce[m],xceref[m],xcedif[m]);
}
}
if (*class != 'U') {
printf(" Comparison of surface integral\n");
} else {
printf(" Surface integral\n");
}
if (*class == 'U') {
printf(" %20.13e\n", xci);
} else if (xcidif > epsilon) {
*verified = FALSE;
printf(" FAILURE: %20.13e%20.13e%20.13e\n",
xci, xciref, xcidif);
} else {
printf(" %20.13e%20.13e%20.13e\n",
xci, xciref, xcidif);
}
if (*class == 'U') {
printf(" No reference values provided\n");
printf(" No verification performed\n");
} else if (*verified) {
printf(" Verification Successful\n");
} else {
printf(" Verification failed\n");
}
}
|
Metric.h | //
// Created by Jin Zhu on 2020/2/18.
//
// #define R_BUILD
#ifndef SRC_METRICS_H
#define SRC_METRICS_H
#include "Data.h"
#include "Algorithm.h"
#include "model_fit.h"
// #include "path.h"
#include <vector>
#include <random>
#include <algorithm>
#include "utilities.h"
template <class T1, class T2, class T3, class T4>
// To do: calculate loss && all to one && lm poisson cox
class Metric
{
public:
bool is_cv;
int Kfold;
int ic_type;
// Eigen::Matrix<T2, Dynamic, 1> cv_initial_model_param;
// Eigen::Matrix<T3, Dynamic, 1> cv_initial_coef0;
std::vector<Eigen::VectorXi> cv_initial_A;
std::vector<Eigen::VectorXi> cv_initial_I;
std::vector<Eigen::VectorXi> train_mask_list;
std::vector<Eigen::VectorXi> test_mask_list;
std::vector<T4> train_X_list;
std::vector<T4> test_X_list;
std::vector<T1> train_y_list;
std::vector<T1> test_y_list;
std::vector<Eigen::VectorXd> train_weight_list;
std::vector<Eigen::VectorXd> test_weight_list;
std::vector<FIT_ARG<T2, T3>> cv_init_fit_arg;
// std::vector<std::vector<T4>> group_XTX_list;
double ic_coef;
Metric() = default;
Metric(int ic_type, double ic_coef = 1.0, bool is_cv = false, int Kfold = 5)
{
this->is_cv = is_cv;
this->ic_type = ic_type;
this->Kfold = Kfold;
this->ic_coef = ic_coef;
if (is_cv)
{
cv_init_fit_arg.resize(Kfold);
train_X_list.resize(Kfold);
test_X_list.resize(Kfold);
train_y_list.resize(Kfold);
test_y_list.resize(Kfold);
test_weight_list.resize(Kfold);
train_weight_list.resize(Kfold);
}
};
void set_cv_init_fit_arg(int p, int M)
{
for (int i = 0; i < this->Kfold; i++)
{
T2 beta_init;
T3 coef0_init;
coef_set_zero(p, M, beta_init, coef0_init);
Eigen::VectorXi A_init;
Eigen::VectorXd bd_init;
FIT_ARG<T2, T3> fit_arg(0, 0., beta_init, coef0_init, bd_init, A_init);
cv_init_fit_arg[i] = fit_arg;
}
}
// void set_cv_initial_model_param(int Kfold, int p)
// {
// this->cv_initial_model_param = Eigen::MatrixXd::Zero(p, Kfold);
// };
// void set_cv_initial_A(int Kfold, int p)
// {
// vector<Eigen::VectorXi> tmp(Kfold);
// this->cv_initial_A = tmp;
// };
// void set_cv_initial_coef0(int Kfold, int p)
// {
// vector<double> tmp(Kfold);
// for (int i = 0; i < Kfold; i++)
// tmp[i] = 0;
// this->cv_initial_coef0 = tmp;
// };
// void update_cv_initial_model_param(Eigen::VectorXd model_param, int k)
// {
// this->cv_initial_model_param.col(k) = model_param;
// }
// void update_cv_initial_A(Eigen::VectorXi A, int k)
// {
// this->cv_initial_A[k] = A;
// }
// void update_cv_initial_coef0(double coef0, int k)
// {
// this->cv_initial_coef0[k] = coef0;
// }
void set_cv_train_test_mask(Data<T1, T2, T3, T4> &data, int n, Eigen::VectorXi &cv_fold_id)
{
Eigen::VectorXi index_list(n);
std::vector<int> index_vec((unsigned int)n);
std::vector<Eigen::VectorXi> group_list((unsigned int)this->Kfold);
for (int i = 0; i < n; i++)
{
index_vec[i] = i;
}
if (cv_fold_id.size() == 0){
// std::random_device rd;
std::mt19937 g(123);
std::shuffle(index_vec.begin(), index_vec.end(), g);
for (int i = 0; i < n; i++)
{
index_list(i) = index_vec[i];
}
Eigen::VectorXd loss_list(this->Kfold);
int group_size = int(n / this->Kfold);
for (int k = 0; k < (this->Kfold - 1); k++)
{
group_list[k] = index_list.segment(int(k * group_size), group_size);
}
group_list[this->Kfold - 1] = index_list.segment(int((this->Kfold - 1) * group_size),
n - int(int(this->Kfold - 1) * group_size));
}else{
// given cv_fold_id
auto rule = [cv_fold_id](int i, int j) -> bool
{
return cv_fold_id(i) < cv_fold_id(j);
};
std::sort(index_vec.begin(), index_vec.end(), rule);
for (int i = 0; i < n; i++)
{
index_list(i) = index_vec[i];
}
int k = 0, st = 0, ed = 1;
while (k < this->Kfold && ed < n){
int mask = cv_fold_id(index_list(st));
while (ed < n && mask == cv_fold_id(index_list(ed))) ed++;
group_list[k] = index_list.segment(st, ed - st);
st = ed; ed++; k++;
}
}
for (int k = 0; k < this->Kfold; k++)
{
std::sort(group_list[k].data(), group_list[k].data() + group_list[k].size());
}
// cv train-test partition:
std::vector<Eigen::VectorXi> train_mask_list_tmp((unsigned int)this->Kfold);
std::vector<Eigen::VectorXi> test_mask_list_tmp((unsigned int)this->Kfold);
for (int k = 0; k < this->Kfold; k++)
{
int train_x_size = n - group_list[k].size();
// get train_mask
Eigen::VectorXi train_mask(train_x_size);
int i = 0;
for (int j = 0; j < this->Kfold; j++)
{
if (j != k)
{
for (int s = 0; s < group_list[j].size(); s++)
{
train_mask(i) = group_list[j](s);
i++;
}
}
}
std::sort(train_mask.data(), train_mask.data() + train_mask.size());
train_mask_list_tmp[k] = train_mask;
test_mask_list_tmp[k] = group_list[k];
slice(data.x, train_mask, this->train_X_list[k]);
slice(data.x, group_list[k], this->test_X_list[k]);
slice(data.y, train_mask, this->train_y_list[k]);
slice(data.y, group_list[k], this->test_y_list[k]);
slice(data.weight, train_mask, this->train_weight_list[k]);
slice(data.weight, group_list[k], this->test_weight_list[k]);
}
this->train_mask_list = train_mask_list_tmp;
this->test_mask_list = test_mask_list_tmp;
};
// void cal_cv_group_XTX(Data<T1, T2, T3> &data)
// {
// int p = data.p;
// Eigen::VectorXi index = data.g_index;
// Eigen::VectorXi gsize = data.g_size;
// int N = data.g_num;
// std::vector<std::vector<Eigen::MatrixXd>> group_XTX_list_tmp(this->Kfold);
// for (int k = 0; k < this->Kfold; k++)
// {
// int train_size = this->train_mask_list[k].size();
// Eigen::MatrixXd train_x(train_size, p);
// for (int i = 0; i < train_size; i++)
// {
// train_x.row(i) = data.x.row(this->train_mask_list[k](i));
// };
// group_XTX_list_tmp[k] = group_XTX(train_x, index, gsize, train_size, p, N, 1);
// }
// this->group_XTX_list = group_XTX_list_tmp;
// }
double ic(int train_n, int M, int N, Algorithm<T1, T2, T3, T4> *algorithm)
{
double loss;
if (algorithm->model_type == 1 || algorithm->model_type == 5)
{
loss = train_n * log(algorithm->get_train_loss());
}
else
{
loss = 2 * algorithm->get_train_loss();
}
if (ic_type == 1)
{
return loss + 2.0 * algorithm->get_effective_number();
}
else if (ic_type == 2)
{
return loss + this->ic_coef * (double(train_n)) * algorithm->get_effective_number();
}
else if (ic_type == 3)
{
return loss + this->ic_coef * log(double(N)) * log(log(double(train_n))) * algorithm->get_effective_number();
}
else if (ic_type == 4)
{
return loss + this->ic_coef * (log(double(train_n)) + 2 * log(double(N))) * algorithm->get_effective_number();
}
else
return 0;
};
double neg_loglik_loss(T4 &train_x, T1 &train_y, Eigen::VectorXd &train_weight, Eigen::VectorXi &g_index, Eigen::VectorXi &g_size, int train_n, int p, int N, Algorithm<T1, T2, T3, T4> *algorithm)
{
Eigen::VectorXi A = algorithm->get_A_out();
T2 beta = algorithm->get_beta();
T3 coef0 = algorithm->get_coef0();
Eigen::VectorXi A_ind = find_ind(A, g_index, g_size, p, N);
T4 X_A = X_seg(train_x, train_n, A_ind);
T2 beta_A;
slice(beta, A_ind, beta_A);
// Eigen::VectorXd beta_A(A_ind.size());
// for (int k = 0; k < A_ind.size(); k++)
// {
// beta_A(k) = beta(A_ind(k));
// }
double L0 = algorithm->neg_loglik_loss(X_A, train_y, train_weight, beta_A, coef0, A, g_index, g_size);
return L0;
}
// to do
double fit_and_evaluate_in_metric(Algorithm<T1, T2, T3, T4> *algorithm, Data<T1, T2, T3, T4> &data, std::vector<Algorithm<T1, T2, T3, T4> *> algorithm_list, FIT_ARG<T2, T3> &fit_arg)
{
int N = data.g_num;
algorithm->update_sparsity_level(fit_arg.support_size);
algorithm->update_lambda_level(fit_arg.lambda);
algorithm->update_beta_init(fit_arg.beta_init);
algorithm->update_bd_init(fit_arg.bd_init);
algorithm->update_coef0_init(fit_arg.coef0_init);
algorithm->update_A_init(fit_arg.A_init, N);
algorithm->fit(data.x, data.y, data.weight, data.g_index, data.g_size, data.n, data.p, data.g_num, data.status, algorithm->Sigma);
if (algorithm->get_warm_start())
{
fit_arg.beta_init = algorithm->get_beta();
fit_arg.coef0_init = algorithm->get_coef0();
fit_arg.bd_init = algorithm->get_bd();
}
if (is_cv)
{
Eigen::VectorXi g_index = data.g_index;
Eigen::VectorXi g_size = data.g_size;
int p = data.p;
int N = data.g_num;
Eigen::VectorXd loss_list(this->Kfold);
#pragma omp parallel for
///////////////////////parallel/////////////////////////
for (int k = 0; k < this->Kfold; k++)
{
//get test_x, test_y
int test_n = this->test_mask_list[k].size();
int train_n = this->train_mask_list[k].size();
// train & test data
// Eigen::MatrixXd train_x = matrix_slice(data.x, this->train_mask_list[k], 0);
// Eigen::MatrixXd test_x = matrix_slice(data.x, this->test_mask_list[k], 0);
// Eigen::VectorXd train_y = vector_slice(data.y, this->train_mask_list[k]);
// Eigen::VectorXd test_y = vector_slice(data.y, this->test_mask_list[k]);
// Eigen::VectorXd train_weight = vector_slice(data.weight, this->train_mask_list[k]);
// Eigen::VectorXd test_weight = vector_slice(data.weight, this->test_mask_list[k]);
// Eigen::VectorXd beta_init;
algorithm_list[k]->update_sparsity_level(fit_arg.support_size);
algorithm_list[k]->update_lambda_level(fit_arg.lambda);
if (algorithm_list[k]->get_warm_start())
{
algorithm_list[k]->update_beta_init(this->cv_init_fit_arg[k].beta_init);
algorithm_list[k]->update_bd_init(this->cv_init_fit_arg[k].bd_init);
algorithm_list[k]->update_coef0_init(this->cv_init_fit_arg[k].coef0_init);
algorithm_list[k]->update_A_init(this->cv_init_fit_arg[k].A_init, N);
// beta_init = this->cv_initial_model_param.col(k).eval();
// algorithm->update_beta_init(beta_init);
// algorithm->update_coef0_init(this->cv_initial_coef0[k]);
// algorithm->update_A_init(this->cv_initial_A[k], N);
}
// algorithm->update_train_mask(this->train_mask_list[k]);
/// ??????????????????????????????????????????????????????????????
algorithm_list[k]->fit(this->train_X_list[k], this->train_y_list[k], this->train_weight_list[k], g_index, g_size, train_n, p, N, data.status, algorithm_list[k]->Sigma);
if (algorithm_list[k]->get_warm_start())
{
this->cv_init_fit_arg[k].beta_init = algorithm->get_beta();
this->cv_init_fit_arg[k].coef0_init = algorithm->get_coef0();
this->cv_init_fit_arg[k].bd_init = algorithm->get_bd();
// this->update_cv_initial_model_param(algorithm->get_beta(), k);
// this->update_cv_initial_A(algorithm->get_A_out(), k);
// this->update_cv_initial_coef0(algorithm->get_coef0(), k);
}
loss_list(k) = this->neg_loglik_loss(this->test_X_list[k], this->test_y_list[k], this->test_weight_list[k], g_index, g_size, test_n, p, N, algorithm_list[k]);
}
return loss_list.mean();
}
else
{
return this->ic(data.n, data.M, data.g_num, algorithm);
}
};
};
#endif //SRC_METRICS_H |
matrix.c |
#include "matrix.h"
typedef struct {
double* a;
int b;
} vipair;
double macheps() {
double macheps;
macheps = 1.0;
while ((macheps + 1.0) > 1.0) {
macheps = macheps / 2.0;
}
macheps = macheps * 2;
return macheps;
}
double pmax(double a, double b) {
if (a > b) {
return a;
}
else {
return b;
}
}
double pmin(double a, double b) {
if (a < b) {
return a;
}
else {
return b;
}
}
int imax(int a, int b) {
if (a > b) {
return a;
}
else {
return b;
}
}
int imin(int a, int b) {
if (a < b) {
return a;
}
else {
return b;
}
}
double signx(double x) {
double sgn;
if (x >= 0.) {
sgn = 1.0;
}
else {
sgn = -1.0;
}
return sgn;
}
double l2norm(double *vec, int N) {
double l2, sum;
int i;
sum = 0.;
for (i = 0; i < N; ++i) {
sum += vec[i] * vec[i];
}
l2 = sqrt(sum);
return l2;
}
int compare (const void* ind1, const void* ind2)
{
if (*((vipair *)ind1)->a > *((vipair *)ind2)->a)
return -1;
else if (*((vipair *)ind1)->a < *((vipair *)ind2)->a)
return 1;
else
return 0;
}
void sort1d(double* v,int N, int* pos)
{
vipair* val = NULL;
int i;
if (N <= 0)
return;
val = malloc(sizeof(vipair) * N);
for (i = 0; i < N; ++i) {
val[i].a = &v[i];
val[i].b = i;
}
qsort(val, N, sizeof(vipair), compare);
for (i = 0; i < N; ++i)
pos[i] = val[i].b;
free(val);
}
double array_max_abs(double *array,int N) {
int i;
double m = 0.0;
for (i = 0; i < N;++i) {
if (fabs(array[i]) > m ) {
m = fabs(array[i]);
}
}
return m;
}
double array_max(double *array,int N) {
int i;
double m = array[0];
for (i = 1; i < N;++i) {
if (array[i] > m ) {
m = array[i];
}
}
return m;
}
double array_min(double *array,int N) {
int i;
double m = array[0];
for (i = 1; i < N;++i) {
if (array[i] < m ) {
m = array[i];
}
}
return m;
}
void dtranspose(double *sig, int rows, int cols,double *col) {
int max,ud,i,k;
if (rows >= cols) {
max = cols;
} else {
max = rows;
}
ud = 0;
for (i= -rows + 1; i < cols; i++) {
if (i <= 0) {
ud++;
if (ud >= max)
ud = max;
for (k = 0; k < ud; k++) {
col[k*rows+k-i] = sig[(k-i)*cols+k];
}
} else {
if (i - cols + rows > 0) {
ud--;
if (ud >= max)
ud = max;
}
for (k = 0; k < ud; k++) {
col[(k+i)*rows+k] = sig[k*cols+k+i];
}
}
}
}
void stranspose(double *sig, int rows, int cols,double *col) {
int t,u;
register int i,j;
#pragma omp parallel for private(i,j,t,u)
for (i=0; i < rows; i++) {
t = i * cols;
u = 0;
for (j=0; j < cols; j++) {
col[u+i] = sig[j+t];
u+=rows;
}
}
}
void rtranspose(double *m, int rows, int cols,double *n, int r, int c) {
register int i,j;
int rm,cm;
int rm1,cm1,rm2,cm2;
int block;
block = (int) BLOCKSIZE;
if (rows <= block && cols <= block) {
for (i = 0; i < rows; ++i) {
for (j = 0; j < cols; ++j) {
n[i+j*r] = m[j+i*c];
//cout << *(n+i+j*r) << " ";
}
}
//cout << endl;
} else if (cols >= rows) {
rm = rows;
cm1 = (int) ceil((double) cols/2.0);
cm2 = cols - cm1;
rtranspose(m,rm,cm1,n,r,c);
rtranspose(m+cm1,rm,cm2,n+cm1*r,r,c);
} else if (rows > cols) {
rm1 = (int) ceil((double) rows/2.0);
rm2 = rows - rm1;
cm = cols;
rtranspose(m,rm1,cm,n,r,c);
rtranspose(m+rm1*c,rm2,cm,n+rm1,r,c);
}
}
void ctranspose(double *sig, int rows, int cols,double *col) {
int r,c;
int block;
block = (int) BLOCKSIZE;
r= rows;
c = cols;
if (rows >= block || cols >= block) {
rtranspose(sig,rows,cols,col,r,c);
} else {
stranspose(sig,rows,cols,col);
}
}
void mtranspose(double *sig, int rows, int cols,double *col) {
int block;
block = (int) BLOCKSIZE * 16;
if (rows >= block && cols >= block) {
ctranspose(sig,rows,cols,col);
} else {
stranspose(sig,rows,cols,col);
}
}
void itranspose(double *A, int M, int N) {
int i, j, p, iter;
double *buf;
double temp;
if (M == N) {
// M == N
for (i = 0; i < N; ++i) {
for (j = i + 1; j < N; ++j) {
temp = A[i + j*N];
A[i + j*N] = A[j + i*N];
A[j + i*N] = temp;
}
}
} else if (M > N) {
p = M - N;
buf = (double*)malloc(sizeof(double)* p * N);
memcpy(buf, A + N * N, sizeof(*A)*p*N);
for (i = 0; i < N; ++i) {
for (j = i + 1; j < N; ++j) {
temp = A[i + j*N];
A[i + j*N] = A[j + i*N];
A[j + i*N] = temp;
}
}
for (i = N - 1; i >= 1; --i) {
memmove(A + i*M, A + i*N, sizeof(*A)*M);
}
for (i = 0; i < N; ++i) {
iter = N + i * M;
for (j = 0; j < p; ++j) {
A[iter + j] = buf[j*N + i];
}
}
free(buf);
}
else if (M < N) {
p = N - M;
buf = (double*)malloc(sizeof(double)* p * M);
for (i = 0; i < M; ++i) {
iter = M + i*N;
for (j = 0; j < p; ++j) {
buf[j*M + i] = A[iter + j];
}
}
for (i = 1; i < M; ++i) {
memmove(A + i*M, A + i * N, sizeof(*A)*M);
}
for (i = 0; i < M; ++i) {
for (j = i + 1; j < M; ++j) {
temp = A[i + j*M];
A[i + j*M] = A[j + i*M];
A[j + i*M] = temp;
}
}
memcpy(A + M*M, buf, sizeof(*A)*p*M);
free(buf);
}
}
void mdisplay(double *A, int row, int col) {
int i,j;
printf("\n MATRIX Order : %d X %d \n \n",row,col);
for (i = 0; i < row; i++) {
printf("R%d: ",i);
for ( j = 0; j < col;j++) {
printf("%g ",A[i*col + j]);
}
printf(":R%d \n",i);
}
}
void madd(double* A, double* B, double* C,int rows,int cols) {
int N,i;
/*
* C = A + B . All matrices have identical dimensions rows X cols
*/
N = rows * cols;
#pragma omp parallel for
for (i = 0; i < N; ++i) {
C[i] = A[i] + B[i];
}
}
void msub(double* A, double* B, double* C,int rows,int cols) {
int N,i;
/*
* C = A - B . All matrices have identical dimensions rows X cols
*/
N = rows * cols;
#pragma omp parallel for
for (i = 0; i < N; ++i) {
C[i] = A[i] - B[i];
}
}
void scale(double *A, int rows, int cols, double alpha) {
int N,i;
/*
* A = alpha * A
* Matrix A is overwritten.
*/
N = rows * cols;
#pragma omp parallel for
for (i = 0; i < N;++i) {
A[i] = alpha * A[i];
}
}
void nmult(double* A, double* B, double* C,int ra,int ca, int cb) {
register int i,j,k;
int u,v,t,rb;
/*
* C = A * B , where A is a ra*ca matric while B is a rb*cb
* with ca = rb
* Matrix C is a ra*cb matrix
*/
rb = ca;
#pragma omp parallel for private(i,j,k,v,u,t)
for (i = 0; i < ra; ++i) {
for (j = 0; j < cb; ++j) {
v = i * rb;
u = i *cb;
t = j + u;
C[t] = 0.;
for (k = 0; k < rb;++k) {
C[t] += A[k + v] * B[j + k * cb];
}
}
}
}
void tmult(double* A, double* B, double* C,int ra,int ca, int cb) {
register int i,j,k;
int u,v,t,rb;
double *BT;
BT = (double*) malloc(sizeof(double) * ca * cb);
/*
* C = A * B , where A is a ra*ca matric while B is a rb*cb
* with ca = rb
* Matrix C is a ra*cb matrix
*/
mtranspose(B,ca,cb,BT);
rb = ca;
#pragma omp parallel for private(i,j,k,v,u,t)
for (i = 0; i < ra; ++i) {
for (j = 0; j < cb; ++j) {
v = i * rb;
u = i *cb;
t = j + u;
C[t] = 0.;
for (k = 0; k < rb;++k) {
C[t] += A[k + v] * BT[k + j * rb];
}
}
}
free(BT);
}
void recmult(double* A, double* B, double* C,int m,int n, int p,int sA,int sB, int sC) {
int m2,n2,p2;
register int i,j,k;
int u,v,t;
if (m + n + p <= CUTOFF) {
//#pragma omp parallel for private(i,j,k,v,u,t)
for (i = 0; i < m; ++i) {
for (j = 0; j < p; ++j) {
v = i * sB;
u = i * sC;
t = j + u;
for (k = 0; k < n;++k) {
C[t] += A[k + v] * B[j + k * sC];
}
}
}
} else if (m >= n && m >= p) {
m2 = (int) ceil((double) m / 2.0);
recmult(A,B,C,m2,n,p,sA,sB,sC);
recmult(A + m2*sB,B,C + m2*sC,m-m2,n,p,sA,sB,sC);
} else if (n >= m && n >= p) {
n2 = (int) ceil((double) n / 2.0);
recmult(A,B,C,m,n2,p,sA,sB,sC);
recmult(A+n2,B+n2*sC,C,m,n-n2,p,sA,sB,sC);
} else if (p >= m && p >= n) {
p2 = (int) ceil((double) p / 2.0);
recmult(A,B,C,m,n,p2,sA,sB,sC);
recmult(A,B+p2,C+p2,m,n,p-p2,sA,sB,sC);
}
}
void rmult(double* A, double* B, double* C,int m,int n, int p) {
int strA,strB,strC;
int N;
register int i;
strA = m;
strB = n;
strC = p;
N = m * p;
for(i = 0; i < N; ++i) {
C[i] = 0.;
}
recmult(A,B,C,m,n,p,strA,strB,strC);
}
int findrec(int *a, int *b, int *c) {
int rec;
double da,db,dc,mul;
da = (double) *a;
db = (double) *b;
dc = (double) *c;
rec = 0;
mul = 1.;
while (da + db + dc > (double) CUTOFF) {
rec++;
mul *= 2;
da = ceil(da/2.);
db = ceil(db/2.);
dc = ceil(dc/2.);
}
*a = (int) da * mul;
*b = (int) db * mul;
*c = (int) dc * mul;
return rec;
}
void add_zero_pad(double *X, int rows, int cols, int zrow, int zcol,double *Y) {
int r,c,i,j,u,v;
r = rows + zrow;
c = cols + zcol;
for (i = 0; i < rows;++i) {
u = i*c;
v = i * cols;
for (j = 0; j < cols;++j) {
Y[u + j] = X[v + j];
}
for (j = cols; j < c;++j) {
Y[u + j] = 0.;
}
}
for (i = rows; i < r;++i) {
u = i*c;
for(j = 0; j < c;++j) {
Y[u + j] = 0.;
}
}
}
void remove_zero_pad(double *Y, int rows, int cols, int zrow, int zcol,double *Z) {
int r,c,i,j,u,v;
r = rows - zrow;
c = cols - zcol;
for (i = 0; i < r; ++i) {
u = i * c;
v = i * cols;
for (j = 0; j < c; ++j) {
Z[j + u] = Y[j + v];
}
}
}
void madd_stride(double* A, double* B, double* C,int rows,int cols,int sA,int sB,int sC) {
int i,j,u,v,w;
for (i = 0; i < rows; ++i) {
u = i * sC;
v = i * sA;
w = i * sB;
for(j = 0; j < cols;j++) {
C[j + u] = A[j + v] + B[j + w];
}
}
}
void msub_stride(double* A, double* B, double* C,int rows,int cols,int sA,int sB,int sC) {
int i,j,u,v,w;
for (i = 0; i < rows; ++i) {
u = i * sC;
v = i * sA;
w = i * sB;
for(j = 0; j < cols;j++) {
C[j + u] = A[j + v] - B[j + w];
}
}
}
void rmadd_stride(double* A, double* B, double* C,int rows,int cols,int p,int sA,int sB,int sC) {
int i,j,u,v,w;
if (rows + cols + p <= CUTOFF) {
for (i = 0; i < rows; ++i) {
u = i * sC;
v = i * sA;
w = i * sB;
for(j = 0; j < cols;j++) {
C[j + u] = A[j + v] + B[j + w];
}
}
} else {
rows/=2;cols/=2;p/=2;
rmadd_stride(A,B,C,rows,cols,p,sA,sB,sC);
rmadd_stride(A + cols,B + cols,C + cols,rows,cols,p,sA,sB,sC);
rmadd_stride(A + rows *sB,B + rows *sC,C + rows *sC,rows,cols,p,sA,sB,sC);
rmadd_stride(A + rows *sB + cols,B + rows *sC + cols,C + rows *sC + cols,rows,cols,p,sA,sB,sC);
}
}
void rmsub_stride(double* A, double* B, double* C,int rows,int cols,int p,int sA,int sB,int sC) {
int i,j,u,v,w;
if (rows + cols + p <= CUTOFF) {
for (i = 0; i < rows; ++i) {
u = i * sC;
v = i * sA;
w = i * sB;
for(j = 0; j < cols;j++) {
C[j + u] = A[j + v] - B[j + w];
}
}
} else {
rows/=2;cols/=2;p/=2;
rmsub_stride(A,B,C,rows,cols,p,sA,sB,sC);
rmsub_stride(A + cols,B + cols,C + cols,rows,cols,p,sA,sB,sC);
rmsub_stride(A + rows *sB,B + rows *sC,C + rows *sC,rows,cols,p,sA,sB,sC);
rmsub_stride(A + rows *sB + cols,B + rows *sC + cols,C + rows *sC + cols,rows,cols,p,sA,sB,sC);
}
}
void srecmult(double* A, double* B, double* C,int m,int n, int p,int sA,int sB, int sC) {
register int i,j,k;
int u,v,t;
double sum;
double *A1,*B1;
double *a11,*a12,*a21,*a22;
double *b11,*b12,*b21,*b22;
double *c11,*c12,*c21,*c22;
double *m1,*m2,*m3,*m4,*m5,*m6,*m7;
int sm1,sm2,sm3,sm4,sm5,sm6,sm7;
int sA1,sB1;
if (m + n + p <= CUTOFF) {
for (i = 0; i < m; ++i) {
for (j = 0; j < p; ++j) {
v = i * sA;
u = i * sC;
t = j + u;
sum = 0.;
for (k = 0; k < n;++k) {
sum += A[k + v] * B[j + k * sB];
}
C[t] = sum;
}
}
} else {
m/=2;n/=2;p/=2;
// A size mXn, C size mXp
a11 = A;
a12 = A + n;
a21 = A + m * sA;
a22 = A + n + m * sA;
//B size nXp
b11 = B;
b12 = B + p;
b21 = B + n * sB;
b22 = B + p + n * sB;
//C size mXp
c11 = C;
c12 = C + p;
c21 = C + m * sC;
c22 = C + p + m * sC;
// m matrices have dimension m X p each. See http://en.wikipedia.org/wiki/Strassen_algorithm
m1 = (double*) malloc(sizeof(double) *m * p);
sm1 = p;
m3 = (double*) malloc(sizeof(double) *m * p);
sm3 = p;
m4 = (double*) malloc(sizeof(double) *m * p);
sm4 = p;
m2 = c21;
sm2 = sC;
m5 = c12;
sm5 = sC;
m6 = c22;
sm6 = sC;
m7 = c11;
sm7 = sC;
//m1
sA1 = n;
sB1 = p;
A1 = (double*) malloc(sizeof(double) * m * n);
B1 = (double*) malloc(sizeof(double) * n * p);
madd_stride(a11,a22,A1,m,n,sA,sA,sA1);
madd_stride(b11,b22,B1,n,p,sB,sB,sB1);
srecmult(A1,B1,m1,m,n,p,sA1,sB1,sm1);
free(A1);
free(B1);
//m2
A1 = (double*) malloc(sizeof(double) * m * n);
madd_stride(a21,a22,A1,m,n,sA,sA,sA1);
srecmult(A1,b11,m2,m,n,p,sA1,sB,sm2);
free(A1);
//m3
B1 = (double*) malloc(sizeof(double) * n * p);
//rmsub_stride(B + p,B + p + n * sC,B1,n,p,m,sC,sC,sC/2);
msub_stride(b12,b22,B1,n,p,sB,sB,sB1);
srecmult(a11,B1,m3,m,n,p,sA,sB1,sm3);
free(B1);
//m4
B1 = (double*) malloc(sizeof(double) * n * p);
//rmsub_stride(B + p,B + p + n * sC,B1,n,p,m,sC,sC,sC/2);
msub_stride(b21,b11,B1,n,p,sB,sB,sB1);
srecmult(a22,B1,m4,m,n,p,sA,sB1,sm4);
free(B1);
//m5
A1 = (double*) malloc(sizeof(double) * m * n);
madd_stride(a11,a12,A1,m,n,sA,sA,sA1);
srecmult(A1,b22,m5,m,n,p,sA1,sB,sm5);
free(A1);
//m6
A1 = (double*) malloc(sizeof(double) * m * n);
B1 = (double*) malloc(sizeof(double) * n * p);
msub_stride(a21,a11,A1,m,n,sA,sA,sA1);
madd_stride(b11,b12,B1,n,p,sB,sB,sB1);
srecmult(A1,B1,m6,m,n,p,sA1,sB1,sm6);
free(A1);
free(B1);
//m7
A1 = (double*) malloc(sizeof(double) * m * n);
B1 = (double*) malloc(sizeof(double) * n * p);
msub_stride(a12,a22,A1,m,n,sA,sA,sA1);
madd_stride(b21,b22,B1,n,p,sB,sB,sB1);
srecmult(A1,B1,m7,m,n,p,sA1,sB1,sm7);
free(A1);
free(B1);
// c11
A1 = (double*) malloc(sizeof(double) * m * p);
sA1 = p;
madd_stride(m1,m7,m7,m,p,sm1,sm7,sm7);
msub_stride(m4,m5,A1,m,p,sm4,sm5,sA1);
madd_stride(m7,A1,m7,m,p,sm7,sA1,sm7);
free(A1);
// c22
A1 = (double*) malloc(sizeof(double) * m * p);
sA1 = p;
madd_stride(m1,m6,m6,m,p,sm1,sm6,sm6);
msub_stride(m3,m2,A1,m,p,sm3,sm2,sA1);
madd_stride(m6,A1,m6,m,p,sm6,sA1,sm6);
free(A1);
//c12
madd_stride(m3,m5,m5,m,p,sm3,sm5,sm5);
//c21
madd_stride(m4,m2,m2,m,p,sm4,sm2,sm2);
free(m1);
free(m3);
free(m4);
}
}
void smult(double* A, double* B, double* C,int m,int n, int p) {
int a,b,c,nrec;
double *X,*Y,*Z,*P;
a = m;
b = n;
c = p;
nrec = findrec(&a,&b,&c);
X = (double*) malloc(sizeof(double) * a * b);
Y = (double*) malloc(sizeof(double) * b * c);
Z = (double*) malloc(sizeof(double) * a * c);
P = (double*) malloc(sizeof(double) * (a/2) * (c/2));
add_zero_pad(A,m,n,a-m,b-n,X);
add_zero_pad(B,n,p,b-n,c-p,Y);
srecmult(X,Y,Z,a,b,c,b,c,c);
// Memory allocation needs work
remove_zero_pad(Z,a,c,a-m,c-p,C);
// free X,Y,Z
free(X);
free(Y);
free(Z);
free(P);
}
void mmult(double* A, double* B, double* C,int m,int n, int p) {
if (m+n+p <= CUTOFF/2) {
nmult(A,B,C,m,n,p);
} else {
smult(A,B,C,m,n,p);
}
}
static int pludecomp(double *A,int N,int *ipiv) {
int k,j,l,c1,c2,mind,tempi;
double ld,mult,mval,temp;
for(k=0;k < N;++k)
ipiv[k] = k;
for(k = 0; k < N-1; ++k) {
//c2 = k*N;
mval = fabs(A[k*N + k]);
mind = k;
for (j=k+1; j < N;++j) {
if (mval < fabs(A[j*N + k])) {
mval = A[j*N + k];
mind = j;
}
}
if ( mind != k) {
c1 = k *N;
c2 = mind * N;
tempi = ipiv[mind];
ipiv[mind] = ipiv[k];
ipiv[k] = tempi;
for (j = 0; j < N;j++) {
temp = A[c1 + j];
*(A + c1 + j) = *(A + c2 + j);
*(A + c2 + j) = temp;
}
}
c2 = k*N;
ld = A[c2 + k];
if (ld != 0.) {
for (j = k+1; j < N; ++j) {
c1 = j*N;
mult = A[c1+k] /= ld;
//printf("\n k %d j %d mult %lf \n",k,j,mult);
for(l = k+1; l < N; ++l) {
A[c1+l] -= mult * A[c2 + l];
}
}
}
}
return 0;
}
void ludecomp(double *A,int N,int *ipiv) {
pludecomp(A,N,ipiv);
}
int rludecomp(double *A, int M, int N, int *ipiv) {
int k, j, l, c1, c2, mind, tempi;
double ld, mult, mval, temp;
for (k = 0; k < M; ++k)
ipiv[k] = (double) k;
if (M > N) {
for (k = 0; k < N; ++k) {
mval = fabs(A[k*N + k]);
mind = k;
for (j = k + 1; j < M; ++j) {
if (mval < fabs(A[j*N + k])) {
mval = A[j*N + k];
mind = j;
}
}
if (mind != k) {
c1 = k *N;
c2 = mind * N;
tempi = ipiv[mind];
ipiv[mind] = ipiv[k];
ipiv[k] = tempi;
for (j = 0; j < N; j++) {
temp = A[c1 + j];
*(A + c1 + j) = *(A + c2 + j);
*(A + c2 + j) = temp;
}
}
c2 = k*N;
ld = A[c2 + k];
if (ld != 0. && k < N) {
for (j = k + 1; j < M; ++j) {
c1 = j*N;
mult = A[c1 + k] /= ld;
//printf("\n k %d j %d mult %f \n",k,j,mult);
for (l = k + 1; l < N; ++l) {
A[c1 + l] -= mult * A[c2 + l];
}
}
}
}
}
else if (M < N) {
for (k = 0; k < M-1; ++k) {
mval = fabs(A[k*N + k]);
mind = k;
for (j = k + 1; j < M; ++j) {
if (mval < fabs(A[j*N + k])) {
mval = A[j*N + k];
mind = j;
}
}
if (mind != k) {
c1 = k *N;
c2 = mind * N;
tempi = ipiv[mind];
ipiv[mind] = ipiv[k];
ipiv[k] = tempi;
for (j = 0; j < N; j++) {
temp = A[c1 + j];
*(A + c1 + j) = *(A + c2 + j);
*(A + c2 + j) = temp;
}
}
c2 = k*N;
ld = A[c2 + k];
if (ld != 0.) {
for (j = k + 1; j < M; ++j) {
c1 = j*N;
mult = A[c1 + k] /= ld;
//printf("\n k %d j %d mult %f \n",k,j,mult);
for (l = k + 1; l < N; ++l) {
A[c1 + l] -= mult * A[c2 + l];
}
}
}
}
}
else if (M == N) {
pludecomp(A,N,ipiv);
}
//mdisplay(ipiv, 1, M);
return 0;
}
void getPLU(double *A, int M , int N, int *ipiv,double *P, double *L, double *U) {
int i, j,k;
// Initialize all the arrays
// P - M*M
// M > N
// L - M*N , U - N*N
// M = N
// L - M*M , U M*M
// M < N
// L - M*M, U - M*N
if (P) {
for (i = 0; i < M*M; ++i) {
P[i] = 0.0;
}
for (j = 0; j < M; ++j) {
P[ipiv[j]*M + j] = 1.0;
}
}
if (M == N) {
if (L) {
for (i = 0; i < M; ++i) {
for (j = 0; j < i; ++j) {
L[i*M + j] = A[i*M + j];
}
L[i*M + i] = 1.0;
for (j = i + 1; j < M; ++j) {
L[i*M + j] = 0.0;
}
}
}
if (U) {
for (i = 0; i < M; ++i) {
for (j = 0; j < i; ++j) {
U[i*M + j] = 0.0;
}
for (j = i; j < M; ++j) {
U[i*M + j] = A[i*M + j];
}
}
}
}
else if (M > N) {
if (L) {
for (i = 0; i < N; ++i) {
for (j = 0; j < i; ++j) {
L[i*N + j] = A[i*N + j];
}
L[i*N + i] = 1.0;
for (j = i + 1; j < N; ++j) {
L[i*N + j] = 0.0;
}
}
memcpy(L + N*N, A + N*N, sizeof(double)*(M - N)*N);
}
if (U) {
for (i = 0; i < N; ++i) {
for (j = 0; j < i; ++j) {
U[i*N + j] = 0.0;
}
for (j = i; j < N; ++j) {
U[i*N + j] = A[i*N + j];
}
}
}
}
else if (M < N) {
if (L) {
for (i = 0; i < M; ++i) {
for (j = 0; j < i; ++j) {
L[i*M + j] = A[i*N + j];
}
L[i*M + i] = 1.0;
for (j = i + 1; j < M; ++j) {
L[i*M + j] = 0.0;
}
}
}
if (U) {
for (i = 0; i < M; ++i) {
for (j = 0; j < i; ++j) {
U[i*N + j] = 0.0;
}
for (j = i; j < N; ++j) {
U[i*N + j] = A[i*N + j];
}
}
}
}
}
void getPU(double *A, int M, int N, int *ipiv, double *P,double *U) {
int i,j,K;
int *ipivt;
double *L;
ipivt = (int*)malloc(sizeof(int)*M);
for (i = 0; i < M; ++i) {
ipivt[ipiv[i]] = i;
}
if (M > N) {
K = N;
}
else {
K = M;
}
L = (double*)malloc(sizeof(double)*M*K);
if (M == N) {
if (L) {
for (i = 0; i < M; ++i) {
for (j = 0; j < i; ++j) {
L[i*M + j] = A[i*M + j];
}
L[i*M + i] = 1.0;
for (j = i + 1; j < M; ++j) {
L[i*M + j] = 0.0;
}
}
}
if (U) {
for (i = 0; i < M; ++i) {
for (j = 0; j < i; ++j) {
U[i*M + j] = 0.0;
}
for (j = i; j < M; ++j) {
U[i*M + j] = A[i*M + j];
}
}
}
}
else if (M > N) {
if (L) {
for (i = 0; i < N; ++i) {
for (j = 0; j < i; ++j) {
L[i*N + j] = A[i*N + j];
}
L[i*N + i] = 1.0;
for (j = i + 1; j < N; ++j) {
L[i*N + j] = 0.0;
}
}
memcpy(L + N*N, A + N*N, sizeof(double)*(M - N)*N);
}
if (U) {
for (i = 0; i < N; ++i) {
for (j = 0; j < i; ++j) {
U[i*N + j] = 0.0;
}
for (j = i; j < N; ++j) {
U[i*N + j] = A[i*N + j];
}
}
}
}
else if (M < N) {
if (L) {
for (i = 0; i < M; ++i) {
for (j = 0; j < i; ++j) {
L[i*M + j] = A[i*N + j];
}
L[i*M + i] = 1.0;
for (j = i + 1; j < M; ++j) {
L[i*M + j] = 0.0;
}
}
}
if (U) {
for (i = 0; i < M; ++i) {
for (j = 0; j < i; ++j) {
U[i*N + j] = 0.0;
}
for (j = i; j < N; ++j) {
U[i*N + j] = A[i*N + j];
}
}
}
}
for (i = 0; i < M; ++i) {
memcpy(P + i*K, L + ipivt[i] * K, sizeof(double)*K);
}
free(ipivt);
free(L);
}
double* marsaglia_generate(double *values, int N, double average, double deviation)
{
int i;
int M;
double x, y, rsq, f;
M = N + N % 2;
for (i = 0; i < N - 1; i += 2)
{
do {
x = 2.0 * rand() / (double)RAND_MAX - 1.0;
y = 2.0 * rand() / (double)RAND_MAX - 1.0;
rsq = x * x + y * y;
} while (rsq >= 1. || rsq == 0.);
f = sqrt(-2.0 * log(rsq) / rsq);
values[i] = x * f;
values[i + 1] = y * f;
}
if (M != N) {
do {
x = 2.0 * rand() / (double)RAND_MAX - 1.0;
y = 2.0 * rand() / (double)RAND_MAX - 1.0;
rsq = x * x + y * y;
} while (rsq >= 1. || rsq == 0.);
f = sqrt(-2.0 * log(rsq) / rsq);
values[N - 1] = x * f;
}
for (i = 0; i < N; ++i) {
values[i] = (values[i] * deviation + average);
}
return values;
}
void random_matrix(double *A, int M, int N) {
int dim;
dim = M*N;
marsaglia_generate(A, dim, 0.0, 1.0);
}
void linsolve(double *A,int N,double *b,int *ipiv,double *x) {
int i,j,c1,l;
double *y;
double sum;
y = (double*) malloc(sizeof(double) *N);
/*
* Two step Solution L * U * x = b
* Let U*x = y
* Solve L * y = b for y (Forward Substitution
* Solve U * x = b for x (Back Substitution)
*/
for(i = 0; i < N;++i) {
y[i] = 0.;
x[i] = 0.;
if ( A[i*N + i] == 0.) {
printf("The Matrix system does not have a unique solution");
exit(1);
}
//printf("\n B %d",ipiv[i]);
}
// Forward Substitution
y[0] = b[ipiv[0]];
for(i = 1; i < N; ++i) {
sum = 0.;
c1 = i*N;
for(j = 0; j < i; ++j) {
sum += y[j] * A[c1 + j];
}
y[i] = b[ipiv[i]] - sum;
}
// Back Substitution
x[N - 1] = y[N - 1]/A[N * N - 1];
for (i = N - 2; i >= 0; i--) {
sum = 0.;
c1 = i*(N+1);
l=0;
for(j = i+1; j < N;j++) {
l++;
sum += A[c1 + l] * x[j];
}
x[i] = (y[i] - sum) / A[c1];
}
free(y);
}
void minverse(double *A,int N,int *ipiv,double *inv) {
int i,j,stride;
double *col,*x;
col = (double*) malloc(sizeof(double) * N);
x = (double*) malloc(sizeof(double) * N);
for (i = 0; i < N; ++i) {
col[i] = 0.;
x[i] = 0.;
}
for (i = 0; i < N; ++i) {
col[i] = 1.;
linsolve(A,N,col,ipiv,x);
stride = i;
for(j = 0; j < N;++j) {
inv[stride] = x[j];
stride+= N;
}
col[i] = 0.;
}
free(x);
free(col);
}
void eye(double *mat,int N) {
int i,j,t;
for(i = 0;i < N;++i) {
for(j =0; j < N;++j) {
t = i*N;
if (i == j) {
mat[t+j] = 1.;
} else {
mat[t+j] = 0.;
}
}
}
}
void eye_scale(double *mat, int N, double lambda) {
int i, j, t;
for (i = 0; i < N; ++i) {
for (j = 0; j < N; ++j) {
t = i*N;
if (i == j) {
mat[t + j] = lambda;
}
else {
mat[t + j] = 0.;
}
}
}
}
static double house_1(double*x,int N,double *v) {
double beta,mu,temp;
double *sigma;
int i;
sigma = (double*) malloc(sizeof(double) * 1);
if (N > 1) {
mmult(x+1,x+1,sigma,1,N-1,1);
} else {
sigma[0] = 0.0;
}
v[0] =1.;
for (i = 1; i < N;++i) {
v[i] = x[i];
}
if (sigma[0] == 0. && x[0] >= 0.) {
beta = 0.;
} else if (sigma[0] == 0. && x[0] < 0.) {
beta = -2.;
}else {
mu = sqrt(sigma[0] + x[0] * x[0]);
if (x[0] <= 0.) {
v[0] = x[0] - mu;
} else {
v[0] = - sigma[0] / (x[0] + mu);
}
temp = v[0];
beta = (2.0 * v[0] * v[0]) /(sigma[0] + v[0] * v[0]);
for (i = 0; i < N;++i) {
v[i] /= temp;
}
}
free(sigma);
return beta;
}
double house_2(double*x,int N,double *v) {
double sgn,beta,sc;
double *sigma,*e;
int i;
sigma = (double*) malloc(sizeof(double) * 1);
e = (double*) malloc(sizeof(double) * N);
beta = 2.0;
sgn = 1.0;
mmult(x,x,sigma,1,N,1);
sigma[0] = sqrt(sigma[0]);
e[0] =1.;
for (i = 1; i < N;++i) {
e[i] = 0.;
}
if (x[0] > 0.) {
sgn = 1.0;
} else if (x[0] < 0.) {
sgn = -1.0;
} else if (x[0] == 0.) {
sgn = 0.;
}
sc = sigma[0] * sgn;
//scale(e,N,1,sc);
e[0] *= sc;
for(i = 0; i < N;++i) {
v[i] = e[i] + x[i];
}
mmult(v,v,sigma,1,N,1);
sigma[0] = sqrt(sigma[0]);
for(i = 0; i < N;++i) {
v[i] = v[i] / sigma[0];
}
free(sigma);
free(e);
return beta;
}
double house(double*x,int N,double *v) {
double beta;
beta = house_1(x,N,v);
return beta;
}
void housemat(double *v, int N,double beta,double *mat) {
double *temp;
temp = (double*) malloc(sizeof(double) * N * N);
eye(mat,N);
mmult(v,v,temp,N,1,N);
scale(temp,N,N,beta);
msub(mat,temp,mat,N,N);
free(temp);
}
static void tred2(double *a, int N, double *d, double *e) {
// Modified version of Numerical recipes tred2 alogorithm
int l, k, j, i;
double scale, hh, h, g, f;
for (i = N - 1; i > 0; --i) {
l = i - 1;
h = scale = 0.0;
if (l > 0) {
for (k = 0; k <= l; ++k) {
scale +=(double) fabs(a[i*N + k]);
}
if (scale == 0.0) {
e[i] = a[i*N + l];
}
else {
for (k = 0; k <= l; ++k) {
a[i*N + k] /= scale;
h += a[i*N + k] * a[i*N + k];
}
f = a[i*N + l];
g = (double) (f >= 0.0 ? -sqrt(h) : sqrt(h));
e[i] = scale*g;
h -= f*g;
a[i*N + l] = f - g;
f = 0.0;
for (j = 0; j <= l; ++j) {
a[j*N + i] = a[i*N + j] / h;
g = 0.0;
for (k = 0; k <= j; ++k) {
g += a[j*N + k] * a[i*N + k];
}
for (k = j + 1; k <= l; ++k) {
g += a[k*N + j] * a[i*N + k];
}
e[j] = g / h;
f += e[j] * a[i*N + j];
}
hh = f / (h + h);
for (j = 0; j <= l; ++j) {
f = a[i*N + j];
e[j] = g = e[j] - hh*f;
for (k = 0; k <= j; ++k) {
a[j*N + k] -= (f*e[k] + g*a[i*N + k]);
}
}
}
}
else {
e[i] = a[i*N + l];
}
d[i] = h;
}
d[0] = 0.0;
e[0] = 0.0;
for (i = 0; i < N; ++i) {
l = i - 1;
if (d[i]) {
for (j = 0; j <= l; ++j) {
g = 0.0;
for (k = 0; k <= l; ++k) {
g += a[i*N+k] * a[k*N+j];
}
for (k = 0; k <= l; ++k) {
a[k*N + j] -= g*a[k*N+i];
}
}
}
d[i] = a[i*N+i];
a[i*N+i] = 1.0;
for (j = 0; j <= l; ++j) {
a[j*N+i] = a[i*N+j] = 0.0;
}
}
}
static double pythag(double a, double b) {
double absa, absb,val;
absa = (double) fabs(a);
absb = (double) fabs(b);
if (absa > absb) {
val = (double) absa*sqrt(1.0 + (absb / absa)*(absb / absa));
return val;
}
else {
val = (double) (absb == 0.0 ? 0.0 : absb*sqrt(1.0 + (absa / absb)*(absa / absb)));
return val;
}
}
static void tqli(double *d, int N, double *e, double *z) {
int m, l, iter, i, k;
double s, r, p, g, f, dd, c, b;
for (i = 1; i < N; ++i) {
e[i - 1] = e[i];
}
e[N - 1] = 0;
for (l = 0; l < N; ++l) {
iter = 0;
do {
for (m = l; m < N - 1; ++m) {
dd =(double) fabs(d[m]) + fabs(d[m + 1]);
if ((double)(fabs(e[m]) + dd) == dd) {
break;
}
}
if (m != l) {
if (iter++ == 30) {
printf("Too many iterations in tqli");
}
g = (d[l + 1] - d[l]) / (2.0*e[l]);
r = pythag(g, 1.0);
g = d[m] - d[l] + e[l] / (g + (double) SIGN(r, g));
s = c = 1.0;
p = 0.0;
for (i = m - 1; i >= l; --i) {
f = s*e[i];
b = c*e[i];
e[i + 1] = (r = pythag(f, g));
if (r == 0.0) {
d[i + 1] -= p;
e[m] = 0.0;
break;
}
s = f / r;
c = g / r;
g = d[i + 1] - p;
r = (d[i] - g)*s + 2.0*c*b;
d[i + 1] = g + (p = s*r);
g = c*r - b;
for (k = 0; k < N; ++k) {
f = z[k*N + i + 1];
z[k*N + i + 1] = s*z[k*N + i] + c*f;
z[k*N + i] = c*z[k*N + i] - s*f;
}
}
if (r == 0.0 && i >= l) continue;
d[l] -= p;
e[l] = g;
e[m] = 0.0;
}
} while (m != l);
}
}
void eigensystem(double *mat, int N, double *eval, double *evec) {
double *e;
e = (double*)calloc(N, sizeof(double));
memcpy(evec, mat, sizeof(double)*N*N);
tred2(evec, N, eval, e);
tqli(eval, N, e, evec);
free(e);
}
void qrdecomp(double *A, int M, int N,double *bvec) {
int j,i,k,u,t;
double *x,*v,*AT,*w;
double beta;
if (M < N) {
printf("M should be greater than or equal to N");
exit(1);
}
x = (double*) malloc(sizeof(double) * M);
v = (double*) malloc(sizeof(double) * M);
AT = (double*) malloc(sizeof(double) * M * N);
w = (double*) malloc(sizeof(double) * M * M);
for(j = 0; j < N;++j) {
for(i=j;i < M;++i) {
x[i-j] = A[i*N+j];
}
beta = house(x,M-j,v);
bvec[j] = beta;
for (i=j; i < M; i++) {
t = i * N;
u = 0;
for (k=j; k < N; k++) {
AT[u+i-j] = A[k+t];
u+=(M-j);
}
}
mmult(AT,v,w,N-j,M-j,1);
scale(w,N-j,1,beta);
mmult(v,w,AT,M-j,1,N-j);
for (i=j; i < M; i++) {
t = i *N;
for (k=j; k < N; k++) {
A[t+k] -= AT[(i-j)*(N-j) + k - j];
}
}
if (j < M) {
for(i=j+1;i < M;++i) {
A[i*N+j] = v[i-j];
}
}
}
free(x);
free(v);
free(AT);
free(w);
}
void getQR(double *A,int M,int N,double *bvec,double *Q, double *R) {
int i,j,k,t,u;
double *x,*v,*AT,*w;
x = (double*) malloc(sizeof(double) * M);
v = (double*) malloc(sizeof(double) * M);
AT = (double*) malloc(sizeof(double) * M * N);
w = (double*) malloc(sizeof(double) * M * M);
for(i = 0; i < N;++i) {
t = i *N;
for(j = 0; j < N;++j) {
if (i > j) {
R[t+j] = 0.;
} else {
R[t+j] = A[t+j];
}
}
}
for(i = 0; i < M;++i) {
t = i *N;
for(j = 0; j < N;++j) {
if (i == j) {
Q[t+j] = 1.;
} else {
Q[t+j] = 0.;
}
}
}
for(j = N-1; j >= 0;--j) {
v[0] = 1.;
for(i=j+1;i < M;++i) {
v[i-j] = A[i*N+j];
}
for (i=j; i < M; i++) {
t = i * N;
u = 0;
for (k=j; k < N; k++) {
AT[u+i-j] = Q[k+t];
u+=(M-j);
}
}
mmult(AT,v,w,N-j,M-j,1);
scale(w,N-j,1,bvec[j]);
mmult(v,w,AT,M-j,1,N-j);
for (i=j; i < M; i++) {
t = i *N;
for (k=j; k < N; k++) {
Q[t+k] -= AT[(i-j)*(N-j) + k - j];
}
}
}
free(x);
free(v);
free(AT);
free(w);
}
void hessenberg(double *A,int N) {
int k,i,j,t,u;
double *x,*v,*AT,*w;
double beta;
x = (double*) malloc(sizeof(double) * N);
v = (double*) malloc(sizeof(double) * N);
AT = (double*) malloc(sizeof(double) * N * N);
w = (double*) malloc(sizeof(double) * N);
for (k = 0; k < N-2;++k) {
for(i=k + 1;i < N;++i) {
x[i-k-1] = A[i*N+k];
//printf("x %lf \n",x[i-k-1]);
}
beta = house(x,N-k-1,v);
for (i=k+1; i < N; i++) {
t = i * N;
u = 0;
for (j=k; j < N; j++) {
AT[u+i-k-1] = A[j+t];
u+=(N-k-1);
}
}
//mdisplay(AT,N-k,N-k-1);
mmult(AT,v,w,N-k,N-k-1,1);
scale(w,N-k,1,beta);
mmult(v,w,AT,N-k-1,1,N-k);
//mdisplay(AT,N-k-1,N-k);
for (i=k+1; i < N; i++) {
t = i * N;
for (j=k; j < N; j++) {
A[t+j] -= AT[(i-k-1)*(N-k) + j - k];
}
}
//mdisplay(A,N,N);
for (i=0; i < N; i++) {
t = i * N;
u = i * (N-k-1);
for (j=k+1; j < N; j++) {
AT[u+j-k-1] = A[t+j];
}
}
//mdisplay(AT,N,N-k-1);
mmult(AT,v,w,N,N-k-1,1);
scale(w,N,1,beta);
mmult(w,v,AT,N,1,N-k-1);
//mdisplay(AT,N,N-k-1);
for (i=0; i < N; i++) {
t = i * N;
u = i * (N-k-1);
for (j=k+1; j < N; j++) {
A[t+j] -= AT[u+j-k-1];
}
}
}
free(x);
free(v);
free(AT);
free(w);
}
void francisQR(double *A,int N) {
int m,n,k,q,r,t,u,i,j;
double s,t2,beta;
double *x,*v,*AT,*w;
int NN;
/*
* Reference - Algorithm 7.5.1 Golub,van Loan Matrix Computations 3rd Edition
*/
x = (double*) malloc(sizeof(double) * 3);
v = (double*) malloc(sizeof(double) * 3);
AT = (double*) malloc(sizeof(double) * 3 * N);
w = (double*) malloc(sizeof(double) * N);
n = N-1;
m = n-1;
NN = N*N;
s = A[NN-1] + A[NN-N-2];
t2 = A[NN-1] * A[NN-N-2] - A[NN-2] * A[NN-N-1];
x[0] = A[0]*A[0] + A[1]*A[N] - s*A[0] + t2;
x[1] = A[N]*(A[0] + A[N+1] - s);
x[2] = A[N] * A[N+N+1];
if (N <= 2) {
return;
}
for (k = -1; k < N - 3;++k) {
beta = house(x,3,v);
//mdisplay(x,3,1);
if (k > 0) {
q = k;
} else {
q = 0;
}
//printf("q %d \n",q);
for (i=k+1; i < k+4; i++) {
t = i * N;
u = 0;
for (j=q; j < N; j++) {
AT[u+i-k-1] = A[j+t];
u+=3;
}
}
mmult(AT,v,w,N-q,3,1);
scale(w,N-q,1,beta);
mmult(v,w,AT,3,1,N-q);
for (i=k+1; i < k+4; i++) {
t = i * N;
for (j=q; j < N; j++) {
A[t+j] -= AT[(i-k-1)*(N-q) + j - q];
}
}
//mdisplay(A,N,N);
if (k+4 >= n) {
r = N;
} else {
r = k+4+1;
}
//printf("r %d \n",r);
for (i=0; i < r; i++) {
t = i * N;
u = i * 3;
for (j=k+1; j < k+4; j++) {
AT[u+j-k-1] = A[t+j];
}
}
mmult(AT,v,w,r,3,1);
scale(w,r,1,beta);
mmult(w,v,AT,r,1,3);
//mdisplay(AT,N,N-k-1);
for (i=0; i < r; i++) {
t = i * N;
u = i * 3;
for (j=k+1; j < k+4; j++) {
A[t+j] -= AT[u+j-k-1];
}
}
//mdisplay(A,N,N);
x[0] = A[N*(k+2) + k+1];
x[1] = A[N*(k+3) + k+1];
if (k < n-3) {
x[2] = A[N*(k+4) + k+1];
}
//mdisplay(x,3,1);
}
//mdisplay(x,2,1);
beta = house(x,2,v);
for (i=n-1; i < N; i++) {
t = i * N;
u = 0;
for (j=n-2; j < N; j++) {
AT[u+i-n+1] = A[j+t];
u+=2;
}
}
mmult(AT,v,w,3,2,1);
scale(w,3,1,beta);
mmult(v,w,AT,2,1,3);
for (i=n-1; i < N; i++) {
t = i * N;
for (j=n-2; j < N; j++) {
A[t+j] -= AT[(i-n+1)*3 + j - n + 2];
}
}
for (i=0; i < N; i++) {
t = i * N;
u = i * 2;
for (j=n-1; j < N; j++) {
AT[u+j-n+1] = A[t+j];
}
}
mmult(AT,v,w,N,2,1);
scale(w,N,1,beta);
mmult(w,v,AT,N,1,2);
//mdisplay(AT,N,N-k-1);
for (i=0; i < N; i++) {
t = i * N;
u = i * 2;
for (j=n-1; j < N; j++) {
A[t+j] -= AT[u+j-n+1];
}
}
free(x);
free(v);
free(AT);
free(w);
}
void eig22(double *A, int stride,double *eigre,double *eigim) {
int N;
double a11,a12,a21,a22,c,s,c2,s2,cs,t1,t,t2,at11,at12,at21,at22;
N = stride;
a11 = A[0];
a12 = A[1];
a21 = A[N];
a22 = A[N+1];
if ( (a12 + a21) == 0) {
c = 1./sqrt(2.0);
s = c;
} else {
t1 = (a11 - a22) / (a12 + a21);
t = t1 /(1. + sqrt(1+t1*t1));
c = 1./sqrt(1 + t*t);
s = c*t;
}
c2 = c*c;
s2 = s*s;
cs = c*s;
at11 = c2 * a11 + s2 * a22 - cs * (a12 + a21);
at12 = c2 * a12 - s2 * a21 + cs * (a11 - a22);
at21 = c2 * a21 - s2 * a12 + cs * (a11 - a22);
at22 = c2 * a22 + s2 * a11 + cs * (a12 + a21);
eigre[0] = eigre[1] = at11;
eigim[0] = sqrt(-at12 * at21);
eigim[1] = -sqrt(-at12 * at21);
if ( at12*at21 >= 0) {
if (at12 == 0) {
c = 0;
s = 1;
c2 = 0;
s2 = 1;
cs = 0;
} else {
t = sqrt(at21/at12);
t2 = t * t;
cs = t/(1+t2);
c2 = (1+t2);
s2 = t2 /(1+t2);
}
eigim[0] = eigim[1] = 0.0;
eigre[0] = at11 - cs * (at12 + at21);
eigre[1] = at11 + cs * (at12 + at21);
}
}
int francis_iter(double *A, int N, double *H) {
int success,brkpoint;
int i,j,it,p,q,t,u;
double *temp;
success = 0;
brkpoint = 30 * N;
it = 0;
p = N - 1;
temp = (double*) malloc(sizeof(double) * N * N);
for(i = 0; i < N*N;++i) {
H[i] = A[i];
}
hessenberg(H,N);
while (p > 1 && it < brkpoint) {
while (p > 1 && (H[N*p + p-1] == 0 || H[N*(p-1) + p-2] == 0)) {
if (H[N*p + p-1] == 0) {
p--;
} else if (H[N*(p-1) + p-2] == 0) {
p=p-2;
}
}
if (p > 0) {
q = p-1;
while (q > 0 && fabs(H[N*q + q-1]) != 0) {
q--;
}
//printf("%d %d \n",q,p);
for (i=q; i <= p; i++) {
t = i * N;
u = (i-q) * (p-q+1);
for (j=q; j <= p; j++) {
temp[u+j-q] = H[t+j];
}
}
francisQR(temp,p-q+1);
for (i=q; i <= p; i++) {
t = i * N;
u = (i-q) * (p-q+1);
for (j=q; j <= p; j++) {
H[t+j] = temp[u+j-q];
}
}
//mdisplay(H,N,N);
for(i = q; i <= p-1;++i) {
if ( fabs(H[(i+1)*N+i]) <= TOL * (fabs(H[i*N+i]) + fabs(H[(i+1)*N+i+1]) ) ) {
H[(i+1)*N+i] = 0.;
}
}
it++;
//printf("iter %d \n",it);
}
}
if (it == brkpoint) {
success = 0;
} else {
success = 1;
}
free(temp);
return success;
}
static void eig2t(double *A, int stride) {
int N;
double a11,a12,a21,a22,c,s,c2,s2,cs,t1,t,at11,at12,at21,at22;
N = stride;
a11 = A[0];
a12 = A[1];
a21 = A[N];
a22 = A[N+1];
if ( (a12 + a21) == 0) {
c = 1./sqrt(2.0);
s = c;
} else {
t1 = (a11 - a22) / (a12 + a21);
t = t1 /(1. + sqrt(1+t1*t1));
c = 1./sqrt(1 + t*t);
s = c*t;
}
c2 = c*c;
s2 = s*s;
cs = c*s;
at11 = c2 * a11 + s2 * a22 - cs * (a12 + a21);
at12 = c2 * a12 - s2 * a21 + cs * (a11 - a22);
at21 = c2 * a21 - s2 * a12 + cs * (a11 - a22);
at22 = c2 * a22 + s2 * a11 + cs * (a12 + a21);
A[0] = at11;
A[1] = at12;
A[N] = at21;
A[N+1] = at22;
}
void eig(double *A,int N,double *eigre,double *eigim) {
int i,t,u,n;
double *H;
double t1,t2,cs;
H = (double*) malloc(sizeof(double) * N * N);
n = N - 1;
francis_iter(A,N,H);
//mdisplay(H,N,N);
i = 0;
while (i < n) {
u = i * N;
t = (i+1)*N;
if (H[t+i] != 0.) {
eig2t(H+u+i,N);
i = i +2;
} else {
i++;
}
}
//mdisplay(H,N,N);
i = 0;
while (i < n) {
u = i * N;
t = (i+1)*N;
if (H[t+i] != 0.) {
if (H[u+i+1] * H[t+i] < 0.) {
eigre[i] = H[u+i];
eigre[i+1] = H[t+i+1];
eigim[i] = sqrt(-H[u+i+1] * H[t+i]);
eigim[i+1] = -sqrt(-H[u+i+1] * H[t+i]);
} else {
if (H[u+i+1] == 0.) {
cs = 0.;
} else {
t1 = sqrt(H[t+i]/H[u+i+1]);
t2 = t1 * t1;
cs = t1/(1+t2);
}
eigre[i] = H[u+i] - cs * (H[u+i+1] + H[t+i]);
eigre[i+1] = H[u+i] + cs * (H[u+i+1] + H[t+i]);
eigim[i] = 0.;
eigim[i+1] = 0.;
}
i= i + 2;
} else {
eigre[i] = H[u+i];
eigim[i] = 0.;
i++;
}
}
if (i == n) {
eigre[i] = H[N*N - 1];
eigim[i] = 0.;
}
free(H);
}
static int rcholu(double *A,int N, int stride, double *U22) {
int sc;
int j,i,u,w;
double u11;
if (N == 1) {
if (A[0] > 0) {
A[0] = sqrt(A[0]);
return 0;
} else {
return -1;
}
} else {
if (A[0] < 0) {
return -1;
}
u11 = sqrt(A[0]);
A[0] = u11;
for (j = 1; j < N;++j) {
A[j] /= u11;
}
mmult(A+1,A+1,U22,N-1,1,N-1);
for (i = 0; i < N-1; ++i) {
u = stride + 1+ i * stride;
w = i * (N-1);
for(j = i; j < N-1;j++) {
A[j + u] -= U22[j + w];
}
}
sc = rcholu(A+stride+1,N-1,stride,U22);
if (sc == -1) {
return -1;
}
}
return sc;
}
static int rbcholu(double *A,int N, int stride, double *UB, double *UT) {
int bs,bb,i,j,Nb,t,k,u,v,w,sc;
double *b,*x,*U12,*U12T;
double sum;
bs = (int) BLOCKSIZE;
bb = bs*bs;
if (N <= BLOCKSIZE) {
sc = rcholu(A,N,stride,UB);
if (sc == -1) {
return -1;
}
} else {
Nb = N - bs;
x = (double*) malloc(sizeof(double) * bs);
b = (double*) malloc(sizeof(double) * bs);
U12T = (double*) malloc(sizeof(double) * Nb * bs);
U12 = (double*) malloc(sizeof(double) * Nb * bs);
rcholu(A,bs,stride,UB); // U11
for (i =0; i < bs;++i) {
t = i *stride;
u = 0;
for(j = 0; j < N;++j) {
UT[u+i] = A[j+t];
u += bs;
}
}
for(k = 0; k < Nb;++k) {
u = k * bs;
for(i = 0; i < bs;++i) {
b[i] = UT[bb+u+i];
x[i] = 0.;
}
for (i = 0; i < bs;++i) {
t = i*bs;
sum = 0;
for (j = 0; j < i;++j) {
sum += UT[t+j] * x[j];
}
x[i] = (b[i] - sum) / UT[t+i];
}
v = bs + k;
for(i = 0; i < bs;++i) {
A[v] = x[i];
U12T[u+i] = x[i];
v += stride;
}
}
mtranspose(U12T,Nb,bs,U12);
mmult(U12T,U12,UT,Nb,bs,Nb);
free(U12T);
free(U12);
free(b);
free(x);
for (i = 0; i < Nb; ++i) {
u = bs * stride + bs + i * stride;
w = i * Nb;
for(j = i; j < Nb;j++) {
A[j + u] -= UT[j + w];
}
}
sc = rbcholu(A + bs * stride + bs,Nb,stride,UB,UT);
if (sc == -1) {
return -1;
}
}
return sc;
}
int cholu(double *A, int N) {
int stride,i,j,t,sc;
double *U22;
U22 = (double*) malloc(sizeof(double) * N * N);
stride = N;
sc = rcholu(A,N,stride,U22);
for(i=0; i < N;++i) {
t = i *N;
for(j=0;j < i;++j) {
A[t+j] = 0.;
}
}
free(U22);
return sc;
}
int bcholu(double *A, int N) {
int stride,i,j,t,b,sc;
double *UB,*UT;
b = (int) BLOCKSIZE;
UT = (double*) malloc(sizeof(double) * N * N);
UB = (double*) malloc(sizeof(double) * b * b);
stride = N;
sc = rbcholu(A,N,stride,UB,UT);
for(i=0; i < N;++i) {
t = i *N;
for(j=0;j < i;++j) {
A[t+j] = 0.;
}
}
free(UB);
free(UT);
return sc;
}
int chol(double *A, int N) {
int sc;
if ( N <= (int) BLOCKSIZE) {
sc = cholu(A,N);
} else {
sc = bcholu(A,N);
}
return sc;
}
static void rchold(double *A,int N, int stride, double *U22) {
int j,i,u,w;
double d1;
if (N == 1) {
return;
} else {
d1 = A[0];
for (j = 1; j < N;++j) {
A[j] /= d1;
}
mmult(A+1,A+1,U22,N-1,1,N-1);
scale(U22,N-1,N-1,d1);
for (i = 0; i < N-1; ++i) {
u = stride + 1+ i * stride;
w = i * (N-1);
for(j = i; j < N-1;j++) {
A[j + u] -= U22[j + w];
}
}
rchold(A+stride+1,N-1,stride,U22);
}
}
void chold(double *A, int N) {
int stride,i,j,t;
double *U22;
U22 = (double*) malloc(sizeof(double) * N * N);
stride = N;
rchold(A,N,stride,U22);
for(i=0; i < N;++i) {
t = i *N;
for(j=0;j < i;++j) {
A[t+j] = 0.;
}
}
free(U22);
}
void svd_sort(double *U,int M,int N,double *V,double *q) {
/*
* Pavel Sakov's CSA SVD sort routine is used with some minor
* modifications. See The License below
*/
/*
* Copyright (C) 2000-2008 Pavel Sakov and CSIRO
Redistribution and use of material from the package `csa', with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of material must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. The names of the authors may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
OF SUCH DAMAGE.
*/
int i,j;
double *UT,*VT,*qq;
int *pos;
UT = (double*) malloc(sizeof(double) * N * M);
VT = (double*) malloc(sizeof(double) * N * N);
qq = (double*) malloc(sizeof(double) * N);
pos = (int*) malloc(sizeof(int) * N);
for(i = 0;i < N;++i) {
qq[i] = q[i];
}
for(i = 0;i < M*N;++i) {
UT[i] = U[i];
}
for(i = 0;i < N*N;++i) {
VT[i] = V[i];
}
//mtranspose(U,M,N,UT);
//mtranspose(V,N,N,VT);
sort1d(q,N,pos);
for(i = 0; i < N;++i) {
q[i] = qq[pos[i]];
for (j = 0; j < M;++j) {
U[j*N+i] = UT[j*N+pos[i]];
}
for (j = 0; j < N;++j) {
V[j*N+i] = VT[j*N+pos[i]];
}
}
free(UT);
free(VT);
free(qq);
free(pos);
}
int svd(double *A,int M,int N,double *U,double *V,double *q) {
int i,j,k,l,t,t2,ierr,cancel,iter,l1;
double eps,g,x,s,temp,f,h,c,y,z,scale;
double *e;
/*
THIS SUBROUTINE IS THE MODIFIED C TRANSLATION OF THE
EISPACK FORTRAN TRANSLATION OF THE ALGOL PROCEDURE SVD,
NUM. MATH. 14, 403-420(1970) BY GOLUB AND REINSCH.
HANDBOOK FOR AUTO. COMP., VOL II-LINEAR ALGEBRA, 134-151(1971).
*/
/*
* U = MXN
* V - NXN
* Q - NX1
*/
/*
* The program return error codes
*
* Code 0 if the computation is successful
* Code -1 If M < N . Transpose the matrix such that rows > columns and try again
* Code 15 if maximum iterations are reached without achieving convergence. Increase SVDMAXITER value
* in matrix.h header file. Default Value is 50
*
*/
if (M < N) {
printf("Rows (M) should be greater than Columns (B) \n");
printf("Retry By Transposing the Input Matrix");
return -1;
}
e = (double*) malloc(sizeof(double) * N);
ierr = 0;
eps = macheps();
g = scale = x = 0.0;
for(i = 0; i < M*N;++i) {
U[i] = A[i];
}
for(i = 0; i < N;++i) {
l = i+1;
e[i] = scale * g;
g = 0.0;
s = 0.0;
scale = 0.0;
if (i < M) {
for(k = i; k < M;++k) {
scale += fabs(U[k*N+i]);
}
if (scale != 0.0) {
for(k = i; k < M;++k) {
t = k * N;
U[t+i] /= scale;
temp = U[t+i];
s += temp*temp;
}
f = U[i*N+i];
g = (f < 0) ? sqrt(s) : -sqrt(s);
h = f * g - s;
U[i*N+i] = f - g;
if (i < N - 1) {
for(j = l; j < N;++j) {
s = 0.0;
for(k = i; k < M;++k) {
t = k * N;
s += U[t+i]*U[t+j];
}
f = s / h;
for(k = i; k < M;++k) {
t = k * N;
U[t+j] += f * U[t+i];
}
}
}
for(k = i; k < M;++k) {
t = k * N;
U[t+i] *= scale;
}
}
}
q[i] = scale * g;
g = 0.0;
s = 0.0;
scale = 0.0;
if (i < M && i != N - 1) {
t = i *N;
for(k = l; k < M;++k) {
scale += fabs(U[t+k]);
}
if (scale != 0.0) {
for(k = l; k < N;++k) {
U[t+k] /= scale;
temp = U[t+k];
s = s + temp*temp;
}
f = U[t+l];
g = (f < 0) ? sqrt(s) : -sqrt(s);
h = f * g - s;
U[t+l] = f - g;
for(k = l;k < N;++k) {
e[k] = U[t+k] / h;
}
for (j = l; j < M; j++) {
s = 0.0;
t2 = j * N;
for (k = l; k < N; k++) {
s += U[t2+k] * U[t+k];
}
for (k = l; k < N; k++) {
U[t2+k] += s * e[k];
}
}
for (k = l; k < N; k++)
U[t+k] *= scale;
}
}
temp = fabs(q[i]) + fabs(e[i]);
if (x < temp) {
x = temp;
}
}
/*
ierr = 0;
eps = macheps();
tol = eps;
g = x = 0.0;
for(i = 0; i < M*N;++i) {
U[i] = A[i];
}
for(i = 0; i < N;++i) {
l = i+1;
e[i] = g;
s = 0.0;
for(k = i; k < M;++k) {
t = k * N;
temp = U[t+i];
s += temp*temp;
}
if (s < tol) {
g = 0.0;
} else {
f = U[i*N+i];
g = (f < 0) ? sqrt(s) : -sqrt(s);
h = f * g - s;
U[i*N+i] = f - g;
for(j = l; j < N;++j) {
s = 0.0;
for(k = i; k < M;++k) {
t = k * N;
s += (U[t+i]*U[t+j]);
}
f = s / h;
for(k = i; k < M;++k) {
t = k * N;
U[t+j] += (f * U[t+i]);
}
}
}
q[i] = g;
s = 0.0;
t = i * N;
for(k = l; k < N;++k) {
temp = U[t+k];
s = s + temp*temp;
}
if (s < tol) {
g = 0.0;
} else {
f = U[t+l];
g = (f < 0) ? sqrt(s) : -sqrt(s);
h = f * g - s;
U[t+l] = f - g;
for(k = l;k < N;++k) {
e[k] = U[t+k] / h;
}
for (j = l; j < M; j++) {
s = 0.0;
t2 = j * N;
for (k = l; k < N; k++) {
s += U[t2+k] * U[t+k];
}
for (k = l; k < N; k++) {
U[t2+k] += s * e[k];
}
}
}
temp = fabs(q[i]) + fabs(e[i]);
if (x < temp) {
x = temp;
}
}
*/
//Accumulating Right Hand Transformations
for(i = N - 1;i >= 0;--i) {
t = i * N;
if (i < N - 1) {
if (g != 0.0) {
h = U[t+i+1] * g;
for(j = l;j < N;++j) {
V[j*N+i] = U[t+j] / h;
}
for(j = l;j < N;++j) {
s = 0.0;
for(k = l; k < N;++k) {
s += U[t+k] * V[k*N+j];
}
for(k = l; k < N;++k) {
V[k*N+j] += (s * V[k*N+i]);
}
}
}
for(j = l; j < N;++j) {
V[t+j] = V[j*N+i] = 0.0;
}
}
V[t+i] = 1.0;
g = e[i];
l = i;
}
//Accumulating Left Hand Transformations
for(i = N - 1;i >= 0;--i) {
t = i * N;
l = i+1;
g = q[i];
if (i < N - 1) {
for(j = l;j < N;++j) {
U[t+j] = 0.0;
}
}
if (g != 0.0) {
if (i != N - 1) {
//h = U[t+i] * g;
for(j = l;j < N;++j) {
s = 0.0;
for(k = l; k < M;++k) {
s += (U[k*N+i] * U[k*N+j]);
}
f = (s / U[t+i]) / g;
for(k = i; k < M;++k) {
U[k*N+j] += (f * U[k*N+i]);
}
}
}
for(j = i; j < M;++j) {
U[j*N+i] = U[j*N+i] / g;
}
} else {
for(j = i; j < M;++j) {
U[j*N+i] = 0.0;
}
}
U[t+i] += 1.0;
}
// mdisplay(U,M,N);
eps = eps * x;
for(k = N - 1; k >= 0; --k) {
iter = 0;
while(1) {
iter++;
if (iter > SVDMAXITER) {
printf("Convergence Not Achieved \n");
return 15;
}
cancel = 1;
for(l = k; l >= 0; --l) {
if (fabs(e[l]) <= eps) {
cancel = 0; //test f convergence
break;
}
if (fabs(q[l-1]) <= eps) {
//Cancel
break;
}
}
if (cancel) {
c = 0.0;
s = 1.0;
l1 = l - 1;
for(i = l; i <= k;++i) {
f = s*e[i];
e[i] *= c;
if (fabs(f) <= eps) {
break;
}
g = q[i];
h = q[i] = hypot(f,g);
c = g/h;
s = -f/h;
for(j = 0; j < M;++j) {
t = j * N;
y = U[t+l1];
z = U[t+i];
U[t+l1] = y * c + z * s;
U[t+i] = z * c - y * s;
}
}
}
z = q[k];
if (l != k) {
x = q[l];
y = q[k-1];
g = e[k-1];
h = e[k];
f = 0.5 * (((g + z) / h) * ((g - z) / y) + y / h - h / y);
g = hypot(f,1.0);
if (f < 0.0) {
temp = f - g;
} else {
temp = f+g;
}
f = x - (z / x) * z + (h / x) * (y / temp - h);
//Next QR Transformation
c = s = 1.0;
for(i = l+1; i <= k;++i) {
g = e[i];
y = q[i];
h = s * g;
g = c * g;
e[i-1] = z = hypot(f,h);
c = f / z;
s = h / z;
f = x * c + g * s;
g = g * c - x * s;
h = y * s;
y *= c;
for(j = 0; j < N;++j) {
t = j * N;
x = V[t+i-1];
z = V[t+i];
V[t+i-1] = x * c + z * s;
V[t+i] = z * c - x * s;
}
q[i-1] = z = hypot(f,h);
if (z != 0.0) {
c = f / z;
s = h / z;
}
f = c * g + s * y;
x = c * y - s * g;
for(j = 0; j < M;++j) {
t = j * N;
y = U[t+i-1];
z = U[t+i];
U[t+i-1] = y * c + z * s;
U[t+i] = z * c - y * s;
}
}
e[l] = 0.0;
e[k] = f;
q[k] = x;
} else {
//convergence
if (z < 0.0) {
q[k] = -z;
for (j = 0; j < N; j++) {
t = j *N;
V[t+k] = -V[t+k];
}
}
break;
}
}
}
svd_sort(U,M,N,V,q);
free(e);
return ierr;
}
int svd_transpose(double *A, int M, int N, double *U, double *V, double *q) {
int ret;
/* Call this routine if M < N
* U = MXM
* V - NXM
* Q - MX1
* A = (V * diag(Q) * U' )'
*/
if (M >= N) {
printf("M>=N. Use svd routine.\n");
exit(-1);
}
mtranspose(A, M, N, V);
ret = svd(V, N, M, V, U, q);
return ret;
}
static int rank_c(double *A, int M,int N) {
int i,rnk,ret;
double eps,tol,szmax,qmax;
double *U,*V,*q;
U = (double*) malloc(sizeof(double) * M*N);
V = (double*) malloc(sizeof(double) * N*N);
q = (double*) malloc(sizeof(double) * N);
eps = macheps();
rnk = 0;
if (M < N) {
//mtranspose(A,M,N,U);
szmax = (double) N;
} else {
szmax = (double) M;
}
ret = svd(A,M,N,U,V,q);
qmax = q[0];
if ( ret != 0) {
printf("Failed to Compute SVD");
return -1;
}
tol = qmax*szmax *eps;
for(i = 0; i < N;++i) {
if (q[i] > tol) {
rnk++;
}
}
free(U);
free(V);
free(q);
return rnk;
}
int rank(double *A, int M,int N) {
int rnk;
double *AT;
AT = (double*) malloc(sizeof(double) * M*N);
if (M < N) {
mtranspose(A,M,N,AT);
rnk = rank_c(AT,N,M);
} else {
rnk = rank_c(A,M,N);
}
free(AT);
return rnk;
}
void rsvd(double *A, int M, int N,int K, int oversample, int n_iter,double *U, double *V, double *S) {
/*
A - MXN matrix
K - Rank of approximation being constructed. K <= min(m,n). default value 6
L - block size of the normalized power iterations. Default K+2
n_iter - number of normalized power iterations to conduct. Default 2
U - MXK
V - NXK
S - Diagonal Matrix KXK
*/
if (n_iter == 0) {
printf("Number of power iterations must be >= 1 or set it to < 0 if you want to use default value #RSVD_POWER_ITERATIONS \n");
exit(-1);
}
else if (n_iter < 0) {
n_iter = (int)RSVD_POWER_ITERATIONS;
}
int i, j,maxdim,L;
double *Q1,*Q2,*AT,*R,*bvec,*uq,*vq,*sq;
int *ipiv;
L = K + oversample;
maxdim = M > N ? M : N;
srand(time(NULL));
if (M >= N) {
Q1 = (double*)malloc(sizeof(double)*maxdim*L);
Q2 = (double*)malloc(sizeof(double)*maxdim*L);
AT = (double*)malloc(sizeof(double)*M*N);
ipiv = (int*)malloc(sizeof(int)*maxdim);
R = (double*)malloc(sizeof(double)*L*L);
bvec = (double*)malloc(sizeof(double)*L*L);
uq = (double*)malloc(sizeof(double)*L*L);
vq = (double*)malloc(sizeof(double)*L*N);
sq = (double*)malloc(sizeof(double)*L);
random_matrix(Q1, N, L);// N*L
mtranspose(A, M, N, AT);
for (i = 0; i < n_iter; ++i) {
mmult(A, Q1, Q2, M, N, L);// M*L
rludecomp(Q2, M, L, ipiv);
getPU(Q2, M, L, ipiv, Q1, NULL);// M*L
mmult(AT, Q1, Q2, N, M, L);//N*L
rludecomp(Q2, N, L, ipiv);
getPU(Q2, N, L, ipiv, Q1, NULL);// N*L
}
mmult(A, Q1, Q2, M, N, L);
qrdecomp(Q2, M, L, bvec);
getQR(Q2, M, L, bvec, Q1, R);// M*L
mtranspose(Q1, M, L, Q2);// L*M
mmult(Q2, A, Q1, L, M, N);// L* N
svd_transpose(Q1, L, N, uq, vq, sq);
// uq - LXL
// vq - NXL
//itranspose(vq, N, L);
for (i = 0; i < N; ++i) {
for (j = 0; j < K; ++j) {
V[i*K + j] = vq[i*L + j];
}
}
memcpy(S, sq, sizeof(double)*K);
itranspose(Q2, L, M);
mmult(Q2, uq, Q1, M, L, L);
for (i = 0; i < M; ++i) {
for (j = 0; j < K; ++j) {
U[i*K + j] = Q1[i*L + j];
}
}
free(Q1);
free(Q2);
free(ipiv);
free(AT);
free(R);
free(bvec);
free(uq);
free(vq);
free(sq);
}
else {
printf("Randomized SVD is only implemented for tall matrices (rows > columns)");
exit(-1);
}
}
|
GB_unaryop__ainv_bool_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_bool_int16
// op(A') function: GB_tran__ainv_bool_int16
// C type: bool
// A type: int16_t
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
bool z = (bool) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_BOOL || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_bool_int16
(
bool *restrict Cx,
const int16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_bool_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
primes.c | /*
* primes.c: Example of prime numbers counting in OpenMP.
*
* (C) 2015 Mikhail Kurnosov <mkurnosov@gmail.com>
*/
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
#include <omp.h>
const int a = 1;
const int b = 10000000;
double wtime()
{
struct timeval t;
gettimeofday(&t, NULL);
return (double)t.tv_sec + (double)t.tv_usec * 1E-6;
}
/*
* is_prime_number: Returns 1 if n is a prime number and 0 otherwise.
* This function uses trial division primality test.
*/
int is_prime_number(int n)
{
int limit = sqrt(n) + 1;
for (int i = 2; i <= limit; i++) {
if (n % i == 0)
return 0;
}
return (n > 1) ? 1 : 0;
}
int count_prime_numbers(int a, int b)
{
int nprimes = 0;
/* Count '2' as a prime number */
if (a <= 2) {
nprimes = 1;
a = 2;
}
/* Shift 'a' to odd number */
if (a % 2 == 0)
a++;
/* Loop over odd numbers: a, a + 2, a + 4, ... , b */
for (int i = a; i <= b; i += 2) {
if (is_prime_number(i))
nprimes++;
}
return nprimes;
}
int count_prime_numbers_omp(int a, int b)
{
int nprimes = 0;
/* Count '2' as a prime number */
if (a <= 2) {
nprimes = 1;
a = 2;
}
/* Shift 'a' to odd number */
if (a % 2 == 0)
a++;
#pragma omp parallel
{
int nloc = 0;
/* Loop over odd numbers: a, a + 2, a + 4, ... , b */
#pragma omp for
for (int i = a; i <= b; i += 2) {
if (is_prime_number(i))
nloc++;
}
#pragma omp atomic
nprimes += nloc;
}
return nprimes;
}
double run_serial()
{
double t = wtime();
int n = count_prime_numbers(a, b);
t = wtime() - t;
printf("Result (serial): %d\n", n);
return t;
}
double run_parallel()
{
double t = wtime();
int n = count_prime_numbers_omp(a, b);
t = wtime() - t;
printf("Result (parallel): %d\n", n);
return t;
}
int main(int argc, char **argv)
{
printf("Count prime numbers on [%d, %d]\n", a, b);
double tserial = run_serial();
double tparallel = run_parallel();
printf("Execution time (serial): %.6f\n", tserial);
printf("Execution time (parallel): %.6f\n", tparallel);
printf("Speedup: %.2f\n", tserial / tparallel);
return 0;
}
|
api.c | // RUN: %libomp-compile-and-run
// RUN: %libomp-run | %python %S/check.py -c 'CHECK' %s
// REQUIRES: !abt
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <omp.h>
#define XSTR(x) #x
#define STR(x) XSTR(x)
#define streqls(s1, s2) (!strcmp(s1, s2))
#define check(condition) \
if (!(condition)) { \
fprintf(stderr, "error: %s: %d: " STR(condition) "\n", __FILE__, \
__LINE__); \
exit(1); \
}
#define BUFFER_SIZE 1024
int main(int argc, char** argv) {
char buf[BUFFER_SIZE];
size_t needed;
omp_set_affinity_format("0123456789");
needed = omp_get_affinity_format(buf, BUFFER_SIZE);
check(streqls(buf, "0123456789"));
check(needed == 10)
// Check that it is truncated properly
omp_get_affinity_format(buf, 5);
check(streqls(buf, "0123"));
#pragma omp parallel
{
char my_buf[512];
size_t needed = omp_capture_affinity(my_buf, 512, NULL);
check(streqls(my_buf, "0123456789"));
check(needed == 10);
// Check that it is truncated properly
omp_capture_affinity(my_buf, 5, NULL);
check(streqls(my_buf, "0123"));
}
#pragma omp parallel num_threads(4)
{
omp_display_affinity(NULL);
}
return 0;
}
// CHECK: num_threads=4 0123456789
|
omp_nested_loop2.c | #include <stdio.h>
#include <omp.h>
int main()
{
int i, j;
omp_set_num_threads(4);
omp_set_nested(1);
printf("OMP_NESTED=%d\n", omp_get_nested());
#pragma omp parallel for
for (i=9; i>6; i--) {
#pragma omp parallel for
for (j=0; j<5; j++) {
printf("[%d] (i,j=%d,%d)\n", omp_get_thread_num(), i, j);
}
}
return 0;
}
|
image.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% IIIII M M AAA GGGG EEEEE %
% I MM MM A A G E %
% I M M M AAAAA G GG EEE %
% I M M A A G G E %
% IIIII M M A A GGGG EEEEE %
% %
% %
% MagickCore Image Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/animate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/compress.h"
#include "MagickCore/constitute.h"
#include "MagickCore/delegate.h"
#include "MagickCore/display.h"
#include "MagickCore/draw.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/histogram.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/magic.h"
#include "MagickCore/magick.h"
#include "MagickCore/magick-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/quantize.h"
#include "MagickCore/random_.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/timer.h"
#include "MagickCore/timer-private.h"
#include "MagickCore/token.h"
#include "MagickCore/token-private.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
#include "MagickCore/version.h"
#include "MagickCore/xwindow-private.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImage() returns a pointer to an image structure initialized to
% default values.
%
% The format of the AcquireImage method is:
%
% Image *AcquireImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: Many of the image default values are set from this
% structure. For example, filename, compression, depth, background color,
% and others.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AcquireImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
const char
*option;
Image
*image;
MagickStatusType
flags;
/*
Allocate image structure.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
image=(Image *) AcquireCriticalMemory(sizeof(*image));
(void) memset(image,0,sizeof(*image));
/*
Initialize Image structure.
*/
(void) CopyMagickString(image->magick,"MIFF",MagickPathExtent);
image->storage_class=DirectClass;
image->depth=MAGICKCORE_QUANTUM_DEPTH;
image->colorspace=sRGBColorspace;
image->rendering_intent=PerceptualIntent;
image->gamma=1.000f/2.200f;
image->chromaticity.red_primary.x=0.6400f;
image->chromaticity.red_primary.y=0.3300f;
image->chromaticity.red_primary.z=0.0300f;
image->chromaticity.green_primary.x=0.3000f;
image->chromaticity.green_primary.y=0.6000f;
image->chromaticity.green_primary.z=0.1000f;
image->chromaticity.blue_primary.x=0.1500f;
image->chromaticity.blue_primary.y=0.0600f;
image->chromaticity.blue_primary.z=0.7900f;
image->chromaticity.white_point.x=0.3127f;
image->chromaticity.white_point.y=0.3290f;
image->chromaticity.white_point.z=0.3583f;
image->interlace=NoInterlace;
image->ticks_per_second=UndefinedTicksPerSecond;
image->compose=OverCompositeOp;
(void) QueryColorCompliance(MatteColor,AllCompliance,&image->matte_color,
exception);
(void) QueryColorCompliance(BackgroundColor,AllCompliance,
&image->background_color,exception);
(void) QueryColorCompliance(BorderColor,AllCompliance,&image->border_color,
exception);
(void) QueryColorCompliance(TransparentColor,AllCompliance,
&image->transparent_color,exception);
GetTimerInfo(&image->timer);
image->cache=AcquirePixelCache(0);
image->channel_mask=DefaultChannels;
image->channel_map=AcquirePixelChannelMap();
image->blob=CloneBlobInfo((BlobInfo *) NULL);
image->timestamp=GetMagickTime();
image->debug=IsEventLogging();
image->reference_count=1;
image->semaphore=AcquireSemaphoreInfo();
image->signature=MagickCoreSignature;
if (image_info == (ImageInfo *) NULL)
return(image);
/*
Transfer image info.
*/
SetBlobExempt(image,image_info->file != (FILE *) NULL ? MagickTrue :
MagickFalse);
(void) CopyMagickString(image->filename,image_info->filename,
MagickPathExtent);
(void) CopyMagickString(image->magick_filename,image_info->filename,
MagickPathExtent);
(void) CopyMagickString(image->magick,image_info->magick,MagickPathExtent);
if (image_info->size != (char *) NULL)
{
(void) ParseAbsoluteGeometry(image_info->size,&image->extract_info);
image->columns=image->extract_info.width;
image->rows=image->extract_info.height;
image->offset=image->extract_info.x;
image->extract_info.x=0;
image->extract_info.y=0;
}
if (image_info->extract != (char *) NULL)
{
RectangleInfo
geometry;
(void) memset(&geometry,0,sizeof(geometry));
flags=ParseAbsoluteGeometry(image_info->extract,&geometry);
if (((flags & XValue) != 0) || ((flags & YValue) != 0))
{
image->extract_info=geometry;
Swap(image->columns,image->extract_info.width);
Swap(image->rows,image->extract_info.height);
}
}
image->compression=image_info->compression;
image->quality=image_info->quality;
image->endian=image_info->endian;
image->interlace=image_info->interlace;
image->units=image_info->units;
if (image_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
flags=ParseGeometry(image_info->density,&geometry_info);
if ((flags & RhoValue) != 0)
image->resolution.x=geometry_info.rho;
image->resolution.y=image->resolution.x;
if ((flags & SigmaValue) != 0)
image->resolution.y=geometry_info.sigma;
}
if (image_info->page != (char *) NULL)
{
char
*geometry;
image->page=image->extract_info;
geometry=GetPageGeometry(image_info->page);
(void) ParseAbsoluteGeometry(geometry,&image->page);
geometry=DestroyString(geometry);
}
if (image_info->depth != 0)
image->depth=image_info->depth;
image->dither=image_info->dither;
image->matte_color=image_info->matte_color;
image->background_color=image_info->background_color;
image->border_color=image_info->border_color;
image->transparent_color=image_info->transparent_color;
image->ping=image_info->ping;
image->progress_monitor=image_info->progress_monitor;
image->client_data=image_info->client_data;
if (image_info->cache != (void *) NULL)
ClonePixelCacheMethods(image->cache,image_info->cache);
/*
Set all global options that map to per-image settings.
*/
(void) SyncImageSettings(image_info,image,exception);
/*
Global options that are only set for new images.
*/
option=GetImageOption(image_info,"delay");
if (option != (const char *) NULL)
{
GeometryInfo
geometry_info;
flags=ParseGeometry(option,&geometry_info);
if ((flags & GreaterValue) != 0)
{
if ((double) image->delay > floor(geometry_info.rho+0.5))
image->delay=(size_t) CastDoubleToLong(floor(
geometry_info.rho+0.5));
}
else
if ((flags & LessValue) != 0)
{
if ((double) image->delay < floor(geometry_info.rho+0.5))
image->ticks_per_second=CastDoubleToLong(floor(
geometry_info.sigma+0.5));
}
else
image->delay=(size_t) CastDoubleToLong(floor(geometry_info.rho+0.5));
if ((flags & SigmaValue) != 0)
image->ticks_per_second=CastDoubleToLong(floor(
geometry_info.sigma+0.5));
}
option=GetImageOption(image_info,"dispose");
if (option != (const char *) NULL)
image->dispose=(DisposeType) ParseCommandOption(MagickDisposeOptions,
MagickFalse,option);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImageInfo() allocates the ImageInfo structure.
%
% The format of the AcquireImageInfo method is:
%
% ImageInfo *AcquireImageInfo(void)
%
*/
MagickExport ImageInfo *AcquireImageInfo(void)
{
ImageInfo
*image_info;
image_info=(ImageInfo *) AcquireCriticalMemory(sizeof(*image_info));
GetImageInfo(image_info);
return(image_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e N e x t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireNextImage() initializes the next image in a sequence to
% default values. The next member of image points to the newly allocated
% image. If there is a memory shortage, next is assigned NULL.
%
% The format of the AcquireNextImage method is:
%
% void AcquireNextImage(const ImageInfo *image_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: Many of the image default values are set from this
% structure. For example, filename, compression, depth, background color,
% and others.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void AcquireNextImage(const ImageInfo *image_info,Image *image,
ExceptionInfo *exception)
{
/*
Allocate image structure.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->next=AcquireImage(image_info,exception);
if (GetNextImageInList(image) == (Image *) NULL)
return;
(void) CopyMagickString(GetNextImageInList(image)->filename,image->filename,
MagickPathExtent);
if (image_info != (ImageInfo *) NULL)
(void) CopyMagickString(GetNextImageInList(image)->filename,
image_info->filename,MagickPathExtent);
DestroyBlob(GetNextImageInList(image));
image->next->blob=ReferenceBlob(image->blob);
image->next->endian=image->endian;
image->next->scene=image->scene+1;
image->next->previous=image;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A p p e n d I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AppendImages() takes all images from the current image pointer to the end
% of the image list and appends them to each other top-to-bottom if the
% stack parameter is true, otherwise left-to-right.
%
% The current gravity setting effects how the image is justified in the
% final image.
%
% The format of the AppendImages method is:
%
% Image *AppendImages(const Image *images,const MagickBooleanType stack,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o stack: A value other than 0 stacks the images top-to-bottom.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AppendImages(const Image *images,
const MagickBooleanType stack,ExceptionInfo *exception)
{
#define AppendImageTag "Append/Image"
CacheView
*append_view;
Image
*append_image;
ImageType
image_type;
MagickBooleanType
homogeneous_colorspace,
status;
MagickOffsetType
n;
PixelTrait
alpha_trait;
RectangleInfo
geometry;
const Image
*next;
size_t
depth,
height,
number_images,
width;
ssize_t
x_offset,
y,
y_offset;
/*
Compute maximum area of appended area.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
alpha_trait=images->alpha_trait;
number_images=1;
width=images->columns;
height=images->rows;
depth=images->depth;
image_type=images->type;
homogeneous_colorspace=MagickTrue;
next=GetNextImageInList(images);
for ( ; next != (Image *) NULL; next=GetNextImageInList(next))
{
if (next->depth > depth)
depth=next->depth;
if (next->type != images->type)
image_type=UndefinedType;
if (next->colorspace != images->colorspace)
homogeneous_colorspace=MagickFalse;
if (next->alpha_trait != UndefinedPixelTrait)
alpha_trait=BlendPixelTrait;
number_images++;
if (stack != MagickFalse)
{
if (next->columns > width)
width=next->columns;
height+=next->rows;
continue;
}
width+=next->columns;
if (next->rows > height)
height=next->rows;
}
/*
Append images.
*/
append_image=CloneImage(images,width,height,MagickTrue,exception);
if (append_image == (Image *) NULL)
return((Image *) NULL);
if (image_type != BilevelType)
{
if (SetImageStorageClass(append_image,DirectClass,exception) == MagickFalse)
{
append_image=DestroyImage(append_image);
return((Image *) NULL);
}
if (homogeneous_colorspace == MagickFalse)
(void) SetImageColorspace(append_image,sRGBColorspace,exception);
}
append_image->depth=depth;
append_image->alpha_trait=alpha_trait;
append_image->page=images->page;
(void) SetImageBackgroundColor(append_image,exception);
status=MagickTrue;
x_offset=0;
y_offset=0;
next=images;
append_view=AcquireAuthenticCacheView(append_image,exception);
for (n=0; n < (MagickOffsetType) number_images; n++)
{
CacheView
*image_view;
MagickBooleanType
proceed;
SetGeometry(append_image,&geometry);
GravityAdjustGeometry(next->columns,next->rows,next->gravity,&geometry);
if (stack != MagickFalse)
x_offset-=geometry.x;
else
y_offset-=geometry.y;
image_view=AcquireVirtualCacheView(next,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(next,next,next->rows,1)
#endif
for (y=0; y < (ssize_t) next->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
q=QueueCacheViewAuthenticPixels(append_view,x_offset,y+y_offset,
next->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
GetPixelInfo(next,&pixel);
for (x=0; x < (ssize_t) next->columns; x++)
{
GetPixelInfoPixel(next,p,&pixel);
SetPixelViaPixelInfo(append_image,&pixel,q);
p+=GetPixelChannels(next);
q+=GetPixelChannels(append_image);
}
sync=SyncCacheViewAuthenticPixels(append_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (stack == MagickFalse)
{
x_offset+=(ssize_t) next->columns;
y_offset=0;
}
else
{
x_offset=0;
y_offset+=(ssize_t) next->rows;
}
proceed=SetImageProgress(append_image,AppendImageTag,n,number_images);
if (proceed == MagickFalse)
break;
next=GetNextImageInList(next);
}
append_view=DestroyCacheView(append_view);
if (status == MagickFalse)
append_image=DestroyImage(append_image);
return(append_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C a t c h I m a g e E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CatchImageException() returns if no exceptions are found in the image
% sequence, otherwise it determines the most severe exception and reports
% it as a warning or error depending on the severity.
%
% The format of the CatchImageException method is:
%
% ExceptionType CatchImageException(Image *image)
%
% A description of each parameter follows:
%
% o image: An image sequence.
%
*/
MagickExport ExceptionType CatchImageException(Image *image)
{
ExceptionInfo
*exception;
ExceptionType
severity;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
exception=AcquireExceptionInfo();
CatchException(exception);
severity=exception->severity;
exception=DestroyExceptionInfo(exception);
return(severity);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l i p I m a g e P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClipImagePath() sets the image clip mask based any clipping path information
% if it exists.
%
% The format of the ClipImagePath method is:
%
% MagickBooleanType ClipImagePath(Image *image,const char *pathname,
% const MagickBooleanType inside,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o pathname: name of clipping path resource. If name is preceded by #, use
% clipping path numbered by name.
%
% o inside: if non-zero, later operations take effect inside clipping path.
% Otherwise later operations take effect outside clipping path.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ClipImage(Image *image,ExceptionInfo *exception)
{
return(ClipImagePath(image,"#1",MagickTrue,exception));
}
MagickExport MagickBooleanType ClipImagePath(Image *image,const char *pathname,
const MagickBooleanType inside,ExceptionInfo *exception)
{
#define ClipImagePathTag "ClipPath/Image"
char
*property;
const char
*value;
Image
*clip_mask;
ImageInfo
*image_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pathname != NULL);
property=AcquireString(pathname);
(void) FormatLocaleString(property,MagickPathExtent,"8BIM:1999,2998:%s",
pathname);
value=GetImageProperty(image,property,exception);
property=DestroyString(property);
if (value == (const char *) NULL)
{
ThrowFileException(exception,OptionError,"NoClipPathDefined",
image->filename);
return(MagickFalse);
}
image_info=AcquireImageInfo();
(void) CopyMagickString(image_info->filename,image->filename,
MagickPathExtent);
(void) ConcatenateMagickString(image_info->filename,pathname,
MagickPathExtent);
clip_mask=BlobToImage(image_info,value,strlen(value),exception);
image_info=DestroyImageInfo(image_info);
if (clip_mask == (Image *) NULL)
return(MagickFalse);
if (clip_mask->storage_class == PseudoClass)
{
(void) SyncImage(clip_mask,exception);
if (SetImageStorageClass(clip_mask,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
if (inside != MagickFalse)
(void) NegateImage(clip_mask,MagickFalse,exception);
(void) FormatLocaleString(clip_mask->magick_filename,MagickPathExtent,
"8BIM:1999,2998:%s\nPS",pathname);
(void) SetImageMask(image,WritePixelMask,clip_mask,exception);
image->mask_trait=UpdatePixelTrait;
clip_mask=DestroyImage(clip_mask);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImage() copies an image and returns the copy as a new image object.
%
% If the specified columns and rows is 0, an exact copy of the image is
% returned, otherwise the pixel data is undefined and must be initialized
% with the QueueAuthenticPixels() and SyncAuthenticPixels() methods. On
% failure, a NULL image is returned and exception describes the reason for the
% failure.
%
% The format of the CloneImage method is:
%
% Image *CloneImage(const Image *image,const size_t columns,
% const size_t rows,const MagickBooleanType orphan,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the cloned image.
%
% o rows: the number of rows in the cloned image.
%
% o detach: With a value other than 0, the cloned image is detached from
% its parent I/O stream.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CloneImage(const Image *image,const size_t columns,
const size_t rows,const MagickBooleanType detach,ExceptionInfo *exception)
{
Image
*clone_image;
double
scale;
size_t
length;
/*
Clone the image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((image->columns == 0) || (image->rows == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError,
"NegativeOrZeroImageSize","`%s'",image->filename);
return((Image *) NULL);
}
clone_image=(Image *) AcquireCriticalMemory(sizeof(*clone_image));
(void) memset(clone_image,0,sizeof(*clone_image));
clone_image->signature=MagickCoreSignature;
clone_image->storage_class=image->storage_class;
clone_image->number_channels=image->number_channels;
clone_image->number_meta_channels=image->number_meta_channels;
clone_image->metacontent_extent=image->metacontent_extent;
clone_image->colorspace=image->colorspace;
clone_image->alpha_trait=image->alpha_trait;
clone_image->channels=image->channels;
clone_image->mask_trait=image->mask_trait;
clone_image->columns=image->columns;
clone_image->rows=image->rows;
clone_image->dither=image->dither;
clone_image->image_info=CloneImageInfo(image->image_info);
(void) CloneImageProfiles(clone_image,image);
(void) CloneImageProperties(clone_image,image);
(void) CloneImageArtifacts(clone_image,image);
GetTimerInfo(&clone_image->timer);
if (image->ascii85 != (void *) NULL)
Ascii85Initialize(clone_image);
clone_image->extent=image->extent;
clone_image->magick_columns=image->magick_columns;
clone_image->magick_rows=image->magick_rows;
clone_image->type=image->type;
clone_image->channel_mask=image->channel_mask;
clone_image->channel_map=ClonePixelChannelMap(image->channel_map);
(void) CopyMagickString(clone_image->magick_filename,image->magick_filename,
MagickPathExtent);
(void) CopyMagickString(clone_image->magick,image->magick,MagickPathExtent);
(void) CopyMagickString(clone_image->filename,image->filename,
MagickPathExtent);
clone_image->progress_monitor=image->progress_monitor;
clone_image->client_data=image->client_data;
clone_image->reference_count=1;
clone_image->next=image->next;
clone_image->previous=image->previous;
clone_image->list=NewImageList();
if (detach == MagickFalse)
clone_image->blob=ReferenceBlob(image->blob);
else
{
clone_image->next=NewImageList();
clone_image->previous=NewImageList();
clone_image->blob=CloneBlobInfo((BlobInfo *) NULL);
}
clone_image->ping=image->ping;
clone_image->debug=IsEventLogging();
clone_image->semaphore=AcquireSemaphoreInfo();
if (image->colormap != (PixelInfo *) NULL)
{
/*
Allocate and copy the image colormap.
*/
clone_image->colors=image->colors;
length=(size_t) image->colors;
clone_image->colormap=(PixelInfo *) AcquireQuantumMemory(length+1,
sizeof(*clone_image->colormap));
if (clone_image->colormap == (PixelInfo *) NULL)
{
clone_image=DestroyImage(clone_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) memcpy(clone_image->colormap,image->colormap,length*
sizeof(*clone_image->colormap));
}
if ((columns == 0) || (rows == 0))
{
if (image->montage != (char *) NULL)
(void) CloneString(&clone_image->montage,image->montage);
if (image->directory != (char *) NULL)
(void) CloneString(&clone_image->directory,image->directory);
clone_image->cache=ReferencePixelCache(image->cache);
return(clone_image);
}
scale=1.0;
if (image->columns != 0)
scale=(double) columns/(double) image->columns;
clone_image->page.width=(size_t) CastDoubleToLong(floor(scale*
image->page.width+0.5));
clone_image->page.x=CastDoubleToLong(ceil(scale*image->page.x-0.5));
clone_image->tile_offset.x=CastDoubleToLong(ceil(scale*
image->tile_offset.x-0.5));
scale=1.0;
if (image->rows != 0)
scale=(double) rows/(double) image->rows;
clone_image->page.height=(size_t) CastDoubleToLong(floor(scale*
image->page.height+0.5));
clone_image->page.y=CastDoubleToLong(ceil(scale*image->page.y-0.5));
clone_image->tile_offset.y=CastDoubleToLong(ceil(scale*
image->tile_offset.y-0.5));
clone_image->cache=ClonePixelCache(image->cache);
if (SetImageExtent(clone_image,columns,rows,exception) == MagickFalse)
clone_image=DestroyImage(clone_image);
return(clone_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImageInfo() makes a copy of the given image info structure. If
% NULL is specified, a new image info structure is created initialized to
% default values.
%
% The format of the CloneImageInfo method is:
%
% ImageInfo *CloneImageInfo(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport ImageInfo *CloneImageInfo(const ImageInfo *image_info)
{
ImageInfo
*clone_info;
clone_info=AcquireImageInfo();
if (image_info == (ImageInfo *) NULL)
return(clone_info);
clone_info->compression=image_info->compression;
clone_info->temporary=image_info->temporary;
clone_info->adjoin=image_info->adjoin;
clone_info->antialias=image_info->antialias;
clone_info->scene=image_info->scene;
clone_info->number_scenes=image_info->number_scenes;
clone_info->depth=image_info->depth;
if (image_info->size != (char *) NULL)
(void) CloneString(&clone_info->size,image_info->size);
if (image_info->extract != (char *) NULL)
(void) CloneString(&clone_info->extract,image_info->extract);
if (image_info->scenes != (char *) NULL)
(void) CloneString(&clone_info->scenes,image_info->scenes);
if (image_info->page != (char *) NULL)
(void) CloneString(&clone_info->page,image_info->page);
clone_info->interlace=image_info->interlace;
clone_info->endian=image_info->endian;
clone_info->units=image_info->units;
clone_info->quality=image_info->quality;
if (image_info->sampling_factor != (char *) NULL)
(void) CloneString(&clone_info->sampling_factor,
image_info->sampling_factor);
if (image_info->server_name != (char *) NULL)
(void) CloneString(&clone_info->server_name,image_info->server_name);
if (image_info->font != (char *) NULL)
(void) CloneString(&clone_info->font,image_info->font);
if (image_info->texture != (char *) NULL)
(void) CloneString(&clone_info->texture,image_info->texture);
if (image_info->density != (char *) NULL)
(void) CloneString(&clone_info->density,image_info->density);
clone_info->pointsize=image_info->pointsize;
clone_info->fuzz=image_info->fuzz;
clone_info->matte_color=image_info->matte_color;
clone_info->background_color=image_info->background_color;
clone_info->border_color=image_info->border_color;
clone_info->transparent_color=image_info->transparent_color;
clone_info->dither=image_info->dither;
clone_info->monochrome=image_info->monochrome;
clone_info->colorspace=image_info->colorspace;
clone_info->type=image_info->type;
clone_info->orientation=image_info->orientation;
clone_info->ping=image_info->ping;
clone_info->verbose=image_info->verbose;
clone_info->progress_monitor=image_info->progress_monitor;
clone_info->client_data=image_info->client_data;
clone_info->cache=image_info->cache;
if (image_info->cache != (void *) NULL)
clone_info->cache=ReferencePixelCache(image_info->cache);
if (image_info->profile != (void *) NULL)
clone_info->profile=(void *) CloneStringInfo((StringInfo *)
image_info->profile);
SetImageInfoFile(clone_info,image_info->file);
SetImageInfoBlob(clone_info,image_info->blob,image_info->length);
clone_info->stream=image_info->stream;
clone_info->custom_stream=image_info->custom_stream;
(void) CopyMagickString(clone_info->magick,image_info->magick,
MagickPathExtent);
(void) CopyMagickString(clone_info->unique,image_info->unique,
MagickPathExtent);
(void) CopyMagickString(clone_info->filename,image_info->filename,
MagickPathExtent);
clone_info->channel=image_info->channel;
(void) CloneImageOptions(clone_info,image_info);
clone_info->debug=IsEventLogging();
clone_info->signature=image_info->signature;
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o p y I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CopyImagePixels() copies pixels from the source image as defined by the
% geometry the destination image at the specified offset.
%
% The format of the CopyImagePixels method is:
%
% MagickBooleanType CopyImagePixels(Image *image,const Image *source_image,
% const RectangleInfo *geometry,const OffsetInfo *offset,
% ExceptionInfo *exception);
%
% A description of each parameter follows:
%
% o image: the destination image.
%
% o source_image: the source image.
%
% o geometry: define the dimensions of the source pixel rectangle.
%
% o offset: define the offset in the destination image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType CopyImagePixels(Image *image,
const Image *source_image,const RectangleInfo *geometry,
const OffsetInfo *offset,ExceptionInfo *exception)
{
#define CopyImageTag "Copy/Image"
CacheView
*image_view,
*source_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(source_image != (Image *) NULL);
assert(geometry != (RectangleInfo *) NULL);
assert(offset != (OffsetInfo *) NULL);
if ((offset->x < 0) || (offset->y < 0) ||
((ssize_t) (offset->x+geometry->width) > (ssize_t) image->columns) ||
((ssize_t) (offset->y+geometry->height) > (ssize_t) image->rows))
ThrowBinaryException(OptionError,"GeometryDoesNotContainImage",
image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
/*
Copy image pixels.
*/
status=MagickTrue;
progress=0;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,source_image,geometry->height,1)
#endif
for (y=0; y < (ssize_t) geometry->height; y++)
{
MagickBooleanType
sync;
const Quantum
*magick_restrict p;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,geometry->x,y+geometry->y,
geometry->width,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,offset->x,y+offset->y,
geometry->width,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) geometry->width; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait source_traits=GetPixelChannelTraits(source_image,channel);
if ((traits == UndefinedPixelTrait) ||
((traits & UpdatePixelTrait) == 0) ||
(source_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(image,channel,p[i],q);
}
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CopyImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImage() dereferences an image, deallocating memory associated with
% the image if the reference count becomes zero.
%
% The format of the DestroyImage method is:
%
% Image *DestroyImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *DestroyImage(Image *image)
{
MagickBooleanType
destroy;
/*
Dereference image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
destroy=MagickFalse;
LockSemaphoreInfo(image->semaphore);
image->reference_count--;
if (image->reference_count == 0)
destroy=MagickTrue;
UnlockSemaphoreInfo(image->semaphore);
if (destroy == MagickFalse)
return((Image *) NULL);
/*
Destroy image.
*/
DestroyImagePixels(image);
image->channel_map=DestroyPixelChannelMap(image->channel_map);
if (image->montage != (char *) NULL)
image->montage=DestroyString(image->montage);
if (image->directory != (char *) NULL)
image->directory=DestroyString(image->directory);
if (image->colormap != (PixelInfo *) NULL)
image->colormap=(PixelInfo *) RelinquishMagickMemory(image->colormap);
if (image->geometry != (char *) NULL)
image->geometry=DestroyString(image->geometry);
DestroyImageProfiles(image);
DestroyImageProperties(image);
DestroyImageArtifacts(image);
if (image->ascii85 != (Ascii85Info *) NULL)
image->ascii85=(Ascii85Info *) RelinquishMagickMemory(image->ascii85);
if (image->image_info != (ImageInfo *) NULL)
image->image_info=DestroyImageInfo(image->image_info);
DestroyBlob(image);
if (image->semaphore != (SemaphoreInfo *) NULL)
RelinquishSemaphoreInfo(&image->semaphore);
image->signature=(~MagickCoreSignature);
image=(Image *) RelinquishMagickMemory(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImageInfo() deallocates memory associated with an ImageInfo
% structure.
%
% The format of the DestroyImageInfo method is:
%
% ImageInfo *DestroyImageInfo(ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport ImageInfo *DestroyImageInfo(ImageInfo *image_info)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
if (image_info->size != (char *) NULL)
image_info->size=DestroyString(image_info->size);
if (image_info->extract != (char *) NULL)
image_info->extract=DestroyString(image_info->extract);
if (image_info->scenes != (char *) NULL)
image_info->scenes=DestroyString(image_info->scenes);
if (image_info->page != (char *) NULL)
image_info->page=DestroyString(image_info->page);
if (image_info->sampling_factor != (char *) NULL)
image_info->sampling_factor=DestroyString(
image_info->sampling_factor);
if (image_info->server_name != (char *) NULL)
image_info->server_name=DestroyString(
image_info->server_name);
if (image_info->font != (char *) NULL)
image_info->font=DestroyString(image_info->font);
if (image_info->texture != (char *) NULL)
image_info->texture=DestroyString(image_info->texture);
if (image_info->density != (char *) NULL)
image_info->density=DestroyString(image_info->density);
if (image_info->cache != (void *) NULL)
image_info->cache=DestroyPixelCache(image_info->cache);
if (image_info->profile != (StringInfo *) NULL)
image_info->profile=(void *) DestroyStringInfo((StringInfo *)
image_info->profile);
DestroyImageOptions(image_info);
image_info->signature=(~MagickCoreSignature);
image_info=(ImageInfo *) RelinquishMagickMemory(image_info);
return(image_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i s a s s o c i a t e I m a g e S t r e a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DisassociateImageStream() disassociates the image stream. It checks if the
% blob of the specified image is referenced by other images. If the reference
% count is higher then 1 a new blob is assigned to the specified image.
%
% The format of the DisassociateImageStream method is:
%
% void DisassociateImageStream(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DisassociateImageStream(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
DisassociateBlob(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageInfo() initializes image_info to default values.
%
% The format of the GetImageInfo method is:
%
% void GetImageInfo(ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport void GetImageInfo(ImageInfo *image_info)
{
char
*synchronize;
ExceptionInfo
*exception;
/*
File and image dimension members.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image_info != (ImageInfo *) NULL);
(void) memset(image_info,0,sizeof(*image_info));
image_info->adjoin=MagickTrue;
image_info->interlace=NoInterlace;
image_info->channel=DefaultChannels;
image_info->quality=UndefinedCompressionQuality;
image_info->antialias=MagickTrue;
image_info->dither=MagickTrue;
synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (synchronize != (const char *) NULL)
{
image_info->synchronize=IsStringTrue(synchronize);
synchronize=DestroyString(synchronize);
}
exception=AcquireExceptionInfo();
(void) QueryColorCompliance(BackgroundColor,AllCompliance,
&image_info->background_color,exception);
(void) QueryColorCompliance(BorderColor,AllCompliance,
&image_info->border_color,exception);
(void) QueryColorCompliance(MatteColor,AllCompliance,&image_info->matte_color,
exception);
(void) QueryColorCompliance(TransparentColor,AllCompliance,
&image_info->transparent_color,exception);
exception=DestroyExceptionInfo(exception);
image_info->debug=IsEventLogging();
image_info->signature=MagickCoreSignature;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e I n f o F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageInfoFile() returns the image info file member.
%
% The format of the GetImageInfoFile method is:
%
% FILE *GetImageInfoFile(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport FILE *GetImageInfoFile(const ImageInfo *image_info)
{
return(image_info->file);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageMask() returns the mask associated with the image.
%
% The format of the GetImageMask method is:
%
% Image *GetImageMask(const Image *image,const PixelMask type,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: the mask type, ReadPixelMask or WritePixelMask.
%
*/
MagickExport Image *GetImageMask(const Image *image,const PixelMask type,
ExceptionInfo *exception)
{
CacheView
*mask_view,
*image_view;
Image
*mask_image;
MagickBooleanType
status;
ssize_t
y;
/*
Get image mask.
*/
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
switch (type)
{
case ReadPixelMask:
{
if ((image->channels & ReadMaskChannel) == 0)
return((Image *) NULL);
break;
}
case WritePixelMask:
{
if ((image->channels & WriteMaskChannel) == 0)
return((Image *) NULL);
break;
}
default:
{
if ((image->channels & CompositeMaskChannel) == 0)
return((Image *) NULL);
break;
}
}
mask_image=AcquireImage((ImageInfo *) NULL,exception);
status=SetImageExtent(mask_image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImage(mask_image));
status=MagickTrue;
mask_image->alpha_trait=UndefinedPixelTrait;
(void) SetImageColorspace(mask_image,GRAYColorspace,exception);
image_view=AcquireVirtualCacheView(image,exception);
mask_view=AcquireAuthenticCacheView(mask_image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(mask_view,0,y,mask_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
switch (type)
{
case ReadPixelMask:
{
SetPixelGray(mask_image,GetPixelReadMask(image,p),q);
break;
}
case WritePixelMask:
{
SetPixelGray(mask_image,GetPixelWriteMask(image,p),q);
break;
}
default:
{
SetPixelGray(mask_image,GetPixelCompositeMask(image,p),q);
break;
}
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(mask_image);
}
if (SyncCacheViewAuthenticPixels(mask_view,exception) == MagickFalse)
status=MagickFalse;
}
mask_view=DestroyCacheView(mask_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
mask_image=DestroyImage(mask_image);
return(mask_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e R e f e r e n c e C o u n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageReferenceCount() returns the image reference count.
%
% The format of the GetReferenceCount method is:
%
% ssize_t GetImageReferenceCount(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport ssize_t GetImageReferenceCount(Image *image)
{
ssize_t
reference_count;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
LockSemaphoreInfo(image->semaphore);
reference_count=image->reference_count;
UnlockSemaphoreInfo(image->semaphore);
return(reference_count);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i r t u a l P i x e l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageVirtualPixelMethod() gets the "virtual pixels" method for the
% image. A virtual pixel is any pixel access that is outside the boundaries
% of the image cache.
%
% The format of the GetImageVirtualPixelMethod() method is:
%
% VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
return(GetPixelCacheVirtualMethod(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e r p r e t I m a g e F i l e n a m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InterpretImageFilename() interprets embedded characters in an image filename.
% The filename length is returned.
%
% The format of the InterpretImageFilename method is:
%
% size_t InterpretImageFilename(const ImageInfo *image_info,Image *image,
% const char *format,int value,char *filename,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image_info: the image info..
%
% o image: the image.
%
% o format: A filename describing the format to use to write the numeric
% argument. Only the first numeric format identifier is replaced.
%
% o value: Numeric value to substitute into format filename.
%
% o filename: return the formatted filename in this character buffer.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport size_t InterpretImageFilename(const ImageInfo *image_info,
Image *image,const char *format,int value,char *filename,
ExceptionInfo *exception)
{
char
*q;
const char
*p;
int
c;
MagickBooleanType
canonical;
ssize_t
field_width,
offset;
canonical=MagickFalse;
offset=0;
(void) CopyMagickString(filename,format,MagickPathExtent);
if (IsStringTrue(GetImageOption(image_info,"filename:literal")) != MagickFalse)
return(strlen(filename));
for (p=strchr(format,'%'); p != (char *) NULL; p=strchr(p+1,'%'))
{
q=(char *) p+1;
if (*q == '%')
{
p=q+1;
continue;
}
field_width=0;
if (*q == '0')
field_width=(ssize_t) strtol(q,&q,10);
switch (*q)
{
case 'd':
case 'o':
case 'x':
{
q++;
c=(*q);
*q='\0';
(void) FormatLocaleString(filename+(p-format-offset),(size_t)
(MagickPathExtent-(p-format-offset)),p,value);
offset+=(4-field_width);
*q=c;
(void) ConcatenateMagickString(filename,q,MagickPathExtent);
canonical=MagickTrue;
if (*(q-1) != '%')
break;
p++;
break;
}
case '[':
{
char
pattern[MagickPathExtent];
const char
*option;
char
*r;
ssize_t
i;
ssize_t
depth;
/*
Image option.
*/
if (strchr(p,']') == (char *) NULL)
break;
depth=1;
r=q+1;
for (i=0; (i < (MagickPathExtent-1L)) && (*r != '\0'); i++)
{
if (*r == '[')
depth++;
if (*r == ']')
depth--;
if (depth <= 0)
break;
pattern[i]=(*r++);
}
pattern[i]='\0';
if (LocaleNCompare(pattern,"filename:",9) != 0)
break;
option=(const char *) NULL;
if (image != (Image *) NULL)
option=GetImageProperty(image,pattern,exception);
if ((option == (const char *) NULL) && (image != (Image *) NULL))
option=GetImageArtifact(image,pattern);
if ((option == (const char *) NULL) &&
(image_info != (ImageInfo *) NULL))
option=GetImageOption(image_info,pattern);
if (option == (const char *) NULL)
break;
q--;
c=(*q);
*q='\0';
(void) CopyMagickString(filename+(p-format-offset),option,(size_t)
(MagickPathExtent-(p-format-offset)));
offset+=strlen(pattern)-strlen(option)+3;
*q=c;
(void) ConcatenateMagickString(filename,r+1,MagickPathExtent);
canonical=MagickTrue;
if (*(q-1) != '%')
break;
p++;
break;
}
default:
break;
}
}
if (canonical == MagickFalse)
(void) CopyMagickString(filename,format,MagickPathExtent);
else
for (q=filename; *q != '\0'; q++)
if ((*q == '%') && (*(q+1) == '%'))
(void) CopyMagickString(q,q+1,(size_t) (MagickPathExtent-(q-filename)));
return(strlen(filename));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s H i g h D y n a m i c R a n g e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsHighDynamicRangeImage() returns MagickTrue if any pixel component is
% non-integer or exceeds the bounds of the quantum depth (e.g. for Q16
% 0..65535.
%
% The format of the IsHighDynamicRangeImage method is:
%
% MagickBooleanType IsHighDynamicRangeImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsHighDynamicRangeImage(const Image *image,
ExceptionInfo *exception)
{
#if !defined(MAGICKCORE_HDRI_SUPPORT)
(void) image;
(void) exception;
return(MagickFalse);
#else
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
pixel;
PixelTrait
traits;
traits=GetPixelChannelTraits(image,(PixelChannel) i);
if (traits == UndefinedPixelTrait)
continue;
pixel=(double) p[i];
if ((pixel < 0.0) || (pixel > QuantumRange) ||
(pixel != (double) ((QuantumAny) pixel)))
break;
}
p+=GetPixelChannels(image);
if (i < (ssize_t) GetPixelChannels(image))
status=MagickFalse;
}
if (x < (ssize_t) image->columns)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status != MagickFalse ? MagickFalse : MagickTrue);
#endif
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e O b j e c t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImageObject() returns MagickTrue if the image sequence contains a valid
% set of image objects.
%
% The format of the IsImageObject method is:
%
% MagickBooleanType IsImageObject(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsImageObject(const Image *image)
{
const Image
*p;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
for (p=image; p != (Image *) NULL; p=GetNextImageInList(p))
if (p->signature != MagickCoreSignature)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s T a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsTaintImage() returns MagickTrue any pixel in the image has been altered
% since it was first constituted.
%
% The format of the IsTaintImage method is:
%
% MagickBooleanType IsTaintImage(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsTaintImage(const Image *image)
{
char
magick[MagickPathExtent],
filename[MagickPathExtent];
const Image
*p;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
(void) CopyMagickString(magick,image->magick,MagickPathExtent);
(void) CopyMagickString(filename,image->filename,MagickPathExtent);
for (p=image; p != (Image *) NULL; p=GetNextImageInList(p))
{
if (p->taint != MagickFalse)
return(MagickTrue);
if (LocaleCompare(p->magick,magick) != 0)
return(MagickTrue);
if (LocaleCompare(p->filename,filename) != 0)
return(MagickTrue);
}
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o d i f y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ModifyImage() ensures that there is only a single reference to the image
% to be modified, updating the provided image pointer to point to a clone of
% the original image if necessary.
%
% The format of the ModifyImage method is:
%
% MagickBooleanType ModifyImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ModifyImage(Image **image,
ExceptionInfo *exception)
{
Image
*clone_image;
assert(image != (Image **) NULL);
assert(*image != (Image *) NULL);
assert((*image)->signature == MagickCoreSignature);
if ((*image)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename);
if (GetImageReferenceCount(*image) <= 1)
return(MagickTrue);
clone_image=CloneImage(*image,0,0,MagickTrue,exception);
LockSemaphoreInfo((*image)->semaphore);
(*image)->reference_count--;
UnlockSemaphoreInfo((*image)->semaphore);
*image=clone_image;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w M a g i c k I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewMagickImage() creates a blank image canvas of the specified size and
% background color.
%
% The format of the NewMagickImage method is:
%
% Image *NewMagickImage(const ImageInfo *image_info,const size_t width,
% const size_t height,const PixelInfo *background,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the image width.
%
% o height: the image height.
%
% o background: the image color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *NewMagickImage(const ImageInfo *image_info,
const size_t width,const size_t height,const PixelInfo *background,
ExceptionInfo *exception)
{
CacheView
*image_view;
Image
*image;
MagickBooleanType
status;
ssize_t
y;
assert(image_info != (const ImageInfo *) NULL);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image_info->signature == MagickCoreSignature);
assert(background != (const PixelInfo *) NULL);
image=AcquireImage(image_info,exception);
image->columns=width;
image->rows=height;
image->colorspace=background->colorspace;
image->alpha_trait=background->alpha_trait;
image->fuzz=background->fuzz;
image->depth=background->depth;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelViaPixelInfo(image,background,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e f e r e n c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReferenceImage() increments the reference count associated with an image
% returning a pointer to the image.
%
% The format of the ReferenceImage method is:
%
% Image *ReferenceImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *ReferenceImage(Image *image)
{
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
LockSemaphoreInfo(image->semaphore);
image->reference_count++;
UnlockSemaphoreInfo(image->semaphore);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s e t I m a g e P a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetImagePage() resets the image page canvas and position.
%
% The format of the ResetImagePage method is:
%
% MagickBooleanType ResetImagePage(Image *image,const char *page)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o page: the relative page specification.
%
*/
MagickExport MagickBooleanType ResetImagePage(Image *image,const char *page)
{
MagickStatusType
flags;
RectangleInfo
geometry;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
flags=ParseAbsoluteGeometry(page,&geometry);
if ((flags & WidthValue) != 0)
{
if ((flags & HeightValue) == 0)
geometry.height=geometry.width;
image->page.width=geometry.width;
image->page.height=geometry.height;
}
if ((flags & AspectValue) != 0)
{
if ((flags & XValue) != 0)
image->page.x+=geometry.x;
if ((flags & YValue) != 0)
image->page.y+=geometry.y;
}
else
{
if ((flags & XValue) != 0)
{
image->page.x=geometry.x;
if ((image->page.width == 0) && (geometry.x > 0))
image->page.width=image->columns+geometry.x;
}
if ((flags & YValue) != 0)
{
image->page.y=geometry.y;
if ((image->page.height == 0) && (geometry.y > 0))
image->page.height=image->rows+geometry.y;
}
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s e t I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetImagePixels() reset the image pixels, that is, all the pixel components
% are zereod.
%
% The format of the SetImage method is:
%
% MagickBooleanType ResetImagePixels(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ResetImagePixels(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
size_t
length;
ssize_t
y;
void
*pixels;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
pixels=AcquirePixelCachePixels(image,&length,exception);
if (pixels != (void *) NULL)
{
/*
Reset in-core image pixels.
*/
(void) memset(pixels,0,length);
return(MagickTrue);
}
/*
Reset image pixels.
*/
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
(void) memset(q,0,GetPixelChannels(image)*sizeof(Quantum));
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e A l p h a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageAlpha() sets the alpha levels of the image.
%
% The format of the SetImageAlpha method is:
%
% MagickBooleanType SetImageAlpha(Image *image,const Quantum alpha,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o alpha: the level of transparency: 0 is fully transparent and QuantumRange
% is fully opaque.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageAlpha(Image *image,const Quantum alpha,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
image->alpha_trait=BlendPixelTrait;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelWriteMask(image,q) > (QuantumRange/2))
SetPixelAlpha(image,alpha,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e B a c k g r o u n d C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageBackgroundColor() initializes the image pixels to the image
% background color. The background color is defined by the background_color
% member of the image structure.
%
% The format of the SetImage method is:
%
% MagickBooleanType SetImageBackgroundColor(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageBackgroundColor(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
PixelInfo
background;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if ((image->background_color.alpha_trait != UndefinedPixelTrait) &&
(image->alpha_trait == UndefinedPixelTrait))
(void) SetImageAlphaChannel(image,OnAlphaChannel,exception);
ConformPixelInfo(image,&image->background_color,&background,exception);
/*
Set image background color.
*/
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelViaPixelInfo(image,&background,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C h a n n e l M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageChannelMask() sets the image channel mask from the specified channel
% mask.
%
% The format of the SetImageChannelMask method is:
%
% ChannelType SetImageChannelMask(Image *image,
% const ChannelType channel_mask)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel_mask: the channel mask.
%
*/
MagickExport ChannelType SetImageChannelMask(Image *image,
const ChannelType channel_mask)
{
return(SetPixelChannelMask(image,channel_mask));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColor() set the entire image canvas to the specified color.
%
% The format of the SetImageColor method is:
%
% MagickBooleanType SetImageColor(Image *image,const PixelInfo *color,
% ExeptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o background: the image color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageColor(Image *image,
const PixelInfo *color,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
assert(color != (const PixelInfo *) NULL);
image->colorspace=color->colorspace;
image->alpha_trait=color->alpha_trait;
image->fuzz=color->fuzz;
image->depth=color->depth;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelViaPixelInfo(image,color,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e S t o r a g e C l a s s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageStorageClass() sets the image class: DirectClass for true color
% images or PseudoClass for colormapped images.
%
% The format of the SetImageStorageClass method is:
%
% MagickBooleanType SetImageStorageClass(Image *image,
% const ClassType storage_class,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o storage_class: The image class.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageStorageClass(Image *image,
const ClassType storage_class,ExceptionInfo *exception)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image->storage_class=storage_class;
return(SyncImagePixelCache(image,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageExtent() sets the image size (i.e. columns & rows).
%
% The format of the SetImageExtent method is:
%
% MagickBooleanType SetImageExtent(Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: The image width in pixels.
%
% o rows: The image height in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageExtent(Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
if ((columns == 0) || (rows == 0))
ThrowBinaryException(ImageError,"NegativeOrZeroImageSize",image->filename);
image->columns=columns;
image->rows=rows;
if (image->depth == 0)
{
image->depth=8;
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ImageDepthNotSupported","`%s'",image->filename);
}
if (image->depth > (8*sizeof(MagickSizeType)))
{
image->depth=8*sizeof(MagickSizeType);
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ImageDepthNotSupported","`%s'",image->filename);
}
return(SyncImagePixelCache(image,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfo() initializes the 'magick' field of the ImageInfo structure.
% It is set to a type of image format based on the prefix or suffix of the
% filename. For example, 'ps:image' returns PS indicating a Postscript image.
% JPEG is returned for this filename: 'image.jpg'. The filename prefix has
% precendence over the suffix. Use an optional index enclosed in brackets
% after a file name to specify a desired scene of a multi-resolution image
% format like Photo CD (e.g. img0001.pcd[4]). A True (non-zero) return value
% indicates success.
%
% The format of the SetImageInfo method is:
%
% MagickBooleanType SetImageInfo(ImageInfo *image_info,
% const unsigned int frames,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o frames: the number of images you intend to write.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const MagickInfo *SetImageInfoFromExtension(ImageInfo *image_info,
const char *component,char *magic,ExceptionInfo *exception)
{
const MagickInfo
*magick_info;
MagickFormatType
format_type;
ssize_t
i;
static const char
*format_type_formats[] =
{
"AUTOTRACE",
"BROWSE",
"DCRAW",
"EDIT",
"LAUNCH",
"MPEG:DECODE",
"MPEG:ENCODE",
"PRINT",
"PS:ALPHA",
"PS:CMYK",
"PS:COLOR",
"PS:GRAY",
"PS:MONO",
"SCAN",
"SHOW",
"WIN",
(char *) NULL
};
/*
User specified image format.
*/
(void) CopyMagickString(magic,component,MagickPathExtent);
LocaleUpper(magic);
/*
Look for explicit image formats.
*/
format_type=UndefinedFormatType;
magick_info=GetMagickInfo(magic,exception);
if ((magick_info != (const MagickInfo *) NULL) &&
(magick_info->format_type != UndefinedFormatType))
format_type=magick_info->format_type;
i=0;
while ((format_type == UndefinedFormatType) &&
(format_type_formats[i] != (char *) NULL))
{
if ((*magic == *format_type_formats[i]) &&
(LocaleCompare(magic,format_type_formats[i]) == 0))
format_type=ExplicitFormatType;
i++;
}
if (format_type == UndefinedFormatType)
(void) CopyMagickString(image_info->magick,magic,MagickPathExtent);
else
if (format_type == ExplicitFormatType)
{
image_info->affirm=MagickTrue;
(void) CopyMagickString(image_info->magick,magic,MagickPathExtent);
}
if (LocaleCompare(magic,"RGB") == 0)
image_info->affirm=MagickFalse; /* maybe SGI disguised as RGB */
return(magick_info);
}
MagickExport MagickBooleanType SetImageInfo(ImageInfo *image_info,
const unsigned int frames,ExceptionInfo *exception)
{
char
component[MagickPathExtent],
magic[MagickPathExtent],
path[MagickPathExtent],
*q;
const MagicInfo
*magic_info;
const MagickInfo
*magick_info;
ExceptionInfo
*sans_exception;
Image
*image;
MagickBooleanType
status;
const char
*p;
ssize_t
count;
/*
Look for 'image.format' in filename.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
*component='\0';
GetPathComponent(image_info->filename,SubimagePath,component);
if (*component != '\0')
{
/*
Look for scene specification (e.g. img0001.pcd[4]).
*/
if (IsSceneGeometry(component,MagickFalse) == MagickFalse)
{
if (IsGeometry(component) != MagickFalse)
(void) CloneString(&image_info->extract,component);
}
else
{
size_t
first,
last;
(void) CloneString(&image_info->scenes,component);
image_info->scene=StringToUnsignedLong(image_info->scenes);
image_info->number_scenes=image_info->scene;
p=image_info->scenes;
for (q=(char *) image_info->scenes; *q != '\0'; p++)
{
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == ','))
p++;
first=(size_t) strtol(p,&q,10);
last=first;
while (isspace((int) ((unsigned char) *q)) != 0)
q++;
if (*q == '-')
last=(size_t) strtol(q+1,&q,10);
if (first > last)
Swap(first,last);
if (first < image_info->scene)
image_info->scene=first;
if (last > image_info->number_scenes)
image_info->number_scenes=last;
p=q;
}
image_info->number_scenes-=image_info->scene-1;
}
}
*component='\0';
if (*image_info->magick == '\0')
GetPathComponent(image_info->filename,ExtensionPath,component);
if (*component != '\0')
{
/*
Base path sans any compression extension.
*/
GetPathComponent(image_info->filename,BasePathSansCompressExtension,path);
GetPathComponent(path,ExtensionPath,component);
}
image_info->affirm=MagickFalse;
sans_exception=AcquireExceptionInfo();
if ((*component != '\0') && (IsGlob(component) == MagickFalse))
magick_info=SetImageInfoFromExtension(image_info,component,magic,
sans_exception);
/*
Look for explicit 'format:image' in filename.
*/
*magic='\0';
GetPathComponent(image_info->filename,MagickPath,magic);
if (*magic == '\0')
{
(void) CopyMagickString(magic,image_info->magick,MagickPathExtent);
magick_info=GetMagickInfo(magic,sans_exception);
if (frames == 0)
GetPathComponent(image_info->filename,CanonicalPath,component);
else
GetPathComponent(image_info->filename,SubcanonicalPath,component);
(void) CopyMagickString(image_info->filename,component,MagickPathExtent);
}
else
{
const DelegateInfo
*delegate_info;
/*
User specified image format.
*/
LocaleUpper(magic);
magick_info=GetMagickInfo(magic,sans_exception);
delegate_info=(const DelegateInfo *) NULL;
if (magick_info == (const MagickInfo *) NULL)
{
delegate_info=GetDelegateInfo(magic,"*",sans_exception);
if (delegate_info == (const DelegateInfo *) NULL)
delegate_info=GetDelegateInfo("*",magic,sans_exception);
if ((delegate_info == (const DelegateInfo *) NULL) &&
((*component != '\0') && (IsGlob(component) == MagickFalse)))
{
/*
Retry in case GetMagickInfo loaded a custom module.
*/
magick_info=SetImageInfoFromExtension(image_info,component,magic,
sans_exception);
}
}
if (((magick_info != (const MagickInfo *) NULL) ||
(delegate_info != (const DelegateInfo *) NULL)) &&
(IsMagickConflict(magic) == MagickFalse))
{
image_info->affirm=MagickTrue;
(void) CopyMagickString(image_info->magick,magic,MagickPathExtent);
GetPathComponent(image_info->filename,CanonicalPath,component);
(void) CopyMagickString(image_info->filename,component,
MagickPathExtent);
}
}
sans_exception=DestroyExceptionInfo(sans_exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
if ((image_info->adjoin != MagickFalse) && (frames > 1))
{
/*
Test for multiple image support (e.g. image%02d.png).
*/
(void) InterpretImageFilename(image_info,(Image *) NULL,
image_info->filename,(int) image_info->scene,component,exception);
if ((LocaleCompare(component,image_info->filename) != 0) &&
(strchr(component,'%') == (char *) NULL))
image_info->adjoin=MagickFalse;
}
if ((image_info->adjoin != MagickFalse) && (frames > 0))
{
/*
Some image formats do not support multiple frames per file.
*/
magick_info=GetMagickInfo(magic,exception);
if (magick_info != (const MagickInfo *) NULL)
if (GetMagickAdjoin(magick_info) == MagickFalse)
image_info->adjoin=MagickFalse;
}
if (image_info->affirm != MagickFalse)
return(MagickTrue);
if (frames == 0)
{
unsigned char
*magick;
size_t
magick_size;
/*
Determine the image format from the first few bytes of the file.
*/
magick_size=GetMagicPatternExtent(exception);
if (magick_size == 0)
return(MagickFalse);
image=AcquireImage(image_info,exception);
(void) CopyMagickString(image->filename,image_info->filename,
MagickPathExtent);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImage(image);
return(MagickFalse);
}
if ((IsBlobSeekable(image) == MagickFalse) ||
(IsBlobExempt(image) != MagickFalse))
{
/*
Copy image to seekable temporary file.
*/
*component='\0';
status=ImageToFile(image,component,exception);
(void) CloseBlob(image);
if (status == MagickFalse)
{
(void) RelinquishUniqueFileResource(component);
image=DestroyImage(image);
return(MagickFalse);
}
SetImageInfoFile(image_info,(FILE *) NULL);
(void) CopyMagickString(image->filename,component,MagickPathExtent);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
(void) RelinquishUniqueFileResource(component);
image=DestroyImage(image);
return(MagickFalse);
}
(void) CopyMagickString(image_info->filename,component,
MagickPathExtent);
image_info->temporary=MagickTrue;
}
magick=(unsigned char *) AcquireQuantumMemory(1,magick_size);
if (magick == (unsigned char *) NULL)
{
(void) CloseBlob(image);
image=DestroyImage(image);
return(MagickFalse);
}
(void) memset(magick,0,magick_size);
count=ReadBlob(image,magick_size,magick);
(void) SeekBlob(image,-((MagickOffsetType) count),SEEK_CUR);
(void) CloseBlob(image);
image=DestroyImage(image);
/*
Check magic cache.
*/
sans_exception=AcquireExceptionInfo();
magic_info=GetMagicInfo(magick,(size_t) count,sans_exception);
magick=(unsigned char *) RelinquishMagickMemory(magick);
if ((magic_info != (const MagicInfo *) NULL) &&
(GetMagicName(magic_info) != (char *) NULL))
{
/*
Try to use magick_info that was determined earlier by the extension
*/
if ((magick_info != (const MagickInfo *) NULL) &&
(GetMagickUseExtension(magick_info) != MagickFalse) &&
(LocaleCompare(magick_info->magick_module,GetMagicName(
magic_info)) == 0))
(void) CopyMagickString(image_info->magick,magick_info->name,
MagickPathExtent);
else
{
(void) CopyMagickString(image_info->magick,GetMagicName(
magic_info),MagickPathExtent);
magick_info=GetMagickInfo(image_info->magick,sans_exception);
}
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
sans_exception=DestroyExceptionInfo(sans_exception);
return(MagickTrue);
}
magick_info=GetMagickInfo(image_info->magick,sans_exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
sans_exception=DestroyExceptionInfo(sans_exception);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e I n f o B l o b %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfoBlob() sets the image info blob member.
%
% The format of the SetImageInfoBlob method is:
%
% void SetImageInfoBlob(ImageInfo *image_info,const void *blob,
% const size_t length)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o blob: the blob.
%
% o length: the blob length.
%
*/
MagickExport void SetImageInfoBlob(ImageInfo *image_info,const void *blob,
const size_t length)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
image_info->blob=(void *) blob;
image_info->length=length;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e I n f o C u s t o m S t r e a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfoCustomStream() sets the image info custom stream handlers.
%
% The format of the SetImageInfoCustomStream method is:
%
% void SetImageInfoCustomStream(ImageInfo *image_info,
% CustomStreamInfo *custom_stream)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o custom_stream: your custom stream methods.
%
*/
MagickExport void SetImageInfoCustomStream(ImageInfo *image_info,
CustomStreamInfo *custom_stream)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
image_info->custom_stream=(CustomStreamInfo *) custom_stream;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e I n f o F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfoFile() sets the image info file member.
%
% The format of the SetImageInfoFile method is:
%
% void SetImageInfoFile(ImageInfo *image_info,FILE *file)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o file: the file.
%
*/
MagickExport void SetImageInfoFile(ImageInfo *image_info,FILE *file)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
image_info->file=file;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageMask() associates a mask with the image. The mask must be the same
% dimensions as the image.
%
% The format of the SetImageMask method is:
%
% MagickBooleanType SetImageMask(Image *image,const PixelMask type,
% const Image *mask,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: the mask type, ReadPixelMask or WritePixelMask.
%
% o mask: the image mask.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageMask(Image *image,const PixelMask type,
const Image *mask,ExceptionInfo *exception)
{
CacheView
*mask_view,
*image_view;
MagickBooleanType
status;
ssize_t
y;
/*
Set image mask.
*/
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (mask == (const Image *) NULL)
{
switch (type)
{
case ReadPixelMask:
{
image->channels=(ChannelType) (image->channels & ~ReadMaskChannel);
break;
}
case WritePixelMask:
{
image->channels=(ChannelType) (image->channels & ~WriteMaskChannel);
}
default:
{
image->channels=(ChannelType) (image->channels & ~CompositeMaskChannel);
break;
}
}
return(SyncImagePixelCache(image,exception));
}
switch (type)
{
case ReadPixelMask:
{
image->channels=(ChannelType) (image->channels | ReadMaskChannel);
break;
}
case WritePixelMask:
{
image->channels=(ChannelType) (image->channels | WriteMaskChannel);
break;
}
default:
{
image->channels=(ChannelType) (image->channels | CompositeMaskChannel);
break;
}
}
if (SyncImagePixelCache(image,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
image->mask_trait=UpdatePixelTrait;
mask_view=AcquireVirtualCacheView(mask,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(mask,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(mask_view,0,y,mask->columns,1,exception);
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
intensity;
intensity=0.0;
if ((x < (ssize_t) mask->columns) && (y < (ssize_t) mask->rows))
intensity=GetPixelIntensity(mask,p);
switch (type)
{
case ReadPixelMask:
{
SetPixelReadMask(image,ClampToQuantum(intensity),q);
break;
}
case WritePixelMask:
{
SetPixelWriteMask(image,ClampToQuantum(intensity),q);
break;
}
default:
{
SetPixelCompositeMask(image,ClampToQuantum(intensity),q);
break;
}
}
p+=GetPixelChannels(mask);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image->mask_trait=UndefinedPixelTrait;
mask_view=DestroyCacheView(mask_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e R e g i o n M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageRegionMask() associates a mask with the image as defined by the
% specified region.
%
% The format of the SetImageRegionMask method is:
%
% MagickBooleanType SetImageRegionMask(Image *image,const PixelMask type,
% const RectangleInfo *region,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: the mask type, ReadPixelMask or WritePixelMask.
%
% o geometry: the mask region.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageRegionMask(Image *image,
const PixelMask type,const RectangleInfo *region,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
/*
Set image mask as defined by the region.
*/
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (region == (const RectangleInfo *) NULL)
{
switch (type)
{
case ReadPixelMask:
{
image->channels=(ChannelType) (image->channels & ~ReadMaskChannel);
break;
}
case WritePixelMask:
{
image->channels=(ChannelType) (image->channels & ~WriteMaskChannel);
break;
}
default:
{
image->channels=(ChannelType) (image->channels & ~CompositeMaskChannel);
break;
}
}
return(SyncImagePixelCache(image,exception));
}
switch (type)
{
case ReadPixelMask:
{
image->channels=(ChannelType) (image->channels | ReadMaskChannel);
break;
}
case WritePixelMask:
{
image->channels=(ChannelType) (image->channels | WriteMaskChannel);
break;
}
default:
{
image->channels=(ChannelType) (image->channels | CompositeMaskChannel);
break;
}
}
if (SyncImagePixelCache(image,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
image->mask_trait=UpdatePixelTrait;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
Quantum
pixel;
pixel=QuantumRange;
if (((x >= region->x) && (x < (region->x+(ssize_t) region->width))) &&
((y >= region->y) && (y < (region->y+(ssize_t) region->height))))
pixel=(Quantum) 0;
switch (type)
{
case ReadPixelMask:
{
SetPixelReadMask(image,pixel,q);
break;
}
case WritePixelMask:
{
SetPixelWriteMask(image,pixel,q);
break;
}
default:
{
SetPixelCompositeMask(image,pixel,q);
break;
}
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image->mask_trait=UndefinedPixelTrait;
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e V i r t u a l P i x e l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageVirtualPixelMethod() sets the "virtual pixels" method for the
% image and returns the previous setting. A virtual pixel is any pixel access
% that is outside the boundaries of the image cache.
%
% The format of the SetImageVirtualPixelMethod() method is:
%
% VirtualPixelMethod SetImageVirtualPixelMethod(Image *image,
% const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: choose the type of virtual pixel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport VirtualPixelMethod SetImageVirtualPixelMethod(Image *image,
const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception)
{
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
return(SetPixelCacheVirtualMethod(image,virtual_pixel_method,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S m u s h I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SmushImages() takes all images from the current image pointer to the end
% of the image list and smushes them to each other top-to-bottom if the
% stack parameter is true, otherwise left-to-right.
%
% The current gravity setting now effects how the image is justified in the
% final image.
%
% The format of the SmushImages method is:
%
% Image *SmushImages(const Image *images,const MagickBooleanType stack,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o stack: A value other than 0 stacks the images top-to-bottom.
%
% o offset: minimum distance in pixels between images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t SmushXGap(const Image *smush_image,const Image *images,
const ssize_t offset,ExceptionInfo *exception)
{
CacheView
*left_view,
*right_view;
const Image
*left_image,
*right_image;
RectangleInfo
left_geometry,
right_geometry;
const Quantum
*p;
ssize_t
i,
y;
size_t
gap;
ssize_t
x;
if (images->previous == (Image *) NULL)
return(0);
right_image=images;
SetGeometry(smush_image,&right_geometry);
GravityAdjustGeometry(right_image->columns,right_image->rows,
right_image->gravity,&right_geometry);
left_image=images->previous;
SetGeometry(smush_image,&left_geometry);
GravityAdjustGeometry(left_image->columns,left_image->rows,
left_image->gravity,&left_geometry);
gap=right_image->columns;
left_view=AcquireVirtualCacheView(left_image,exception);
right_view=AcquireVirtualCacheView(right_image,exception);
for (y=0; y < (ssize_t) smush_image->rows; y++)
{
for (x=(ssize_t) left_image->columns-1; x > 0; x--)
{
p=GetCacheViewVirtualPixels(left_view,x,left_geometry.y+y,1,1,exception);
if ((p == (const Quantum *) NULL) ||
(GetPixelAlpha(left_image,p) != TransparentAlpha) ||
((left_image->columns-x-1) >= gap))
break;
}
i=(ssize_t) left_image->columns-x-1;
for (x=0; x < (ssize_t) right_image->columns; x++)
{
p=GetCacheViewVirtualPixels(right_view,x,right_geometry.y+y,1,1,
exception);
if ((p == (const Quantum *) NULL) ||
(GetPixelAlpha(right_image,p) != TransparentAlpha) ||
((x+i) >= (ssize_t) gap))
break;
}
if ((x+i) < (ssize_t) gap)
gap=(size_t) (x+i);
}
right_view=DestroyCacheView(right_view);
left_view=DestroyCacheView(left_view);
if (y < (ssize_t) smush_image->rows)
return(offset);
return((ssize_t) gap-offset);
}
static ssize_t SmushYGap(const Image *smush_image,const Image *images,
const ssize_t offset,ExceptionInfo *exception)
{
CacheView
*bottom_view,
*top_view;
const Image
*bottom_image,
*top_image;
RectangleInfo
bottom_geometry,
top_geometry;
const Quantum
*p;
ssize_t
i,
x;
size_t
gap;
ssize_t
y;
if (images->previous == (Image *) NULL)
return(0);
bottom_image=images;
SetGeometry(smush_image,&bottom_geometry);
GravityAdjustGeometry(bottom_image->columns,bottom_image->rows,
bottom_image->gravity,&bottom_geometry);
top_image=images->previous;
SetGeometry(smush_image,&top_geometry);
GravityAdjustGeometry(top_image->columns,top_image->rows,top_image->gravity,
&top_geometry);
gap=bottom_image->rows;
top_view=AcquireVirtualCacheView(top_image,exception);
bottom_view=AcquireVirtualCacheView(bottom_image,exception);
for (x=0; x < (ssize_t) smush_image->columns; x++)
{
for (y=(ssize_t) top_image->rows-1; y > 0; y--)
{
p=GetCacheViewVirtualPixels(top_view,top_geometry.x+x,y,1,1,exception);
if ((p == (const Quantum *) NULL) ||
(GetPixelAlpha(top_image,p) != TransparentAlpha) ||
((top_image->rows-y-1) >= gap))
break;
}
i=(ssize_t) top_image->rows-y-1;
for (y=0; y < (ssize_t) bottom_image->rows; y++)
{
p=GetCacheViewVirtualPixels(bottom_view,bottom_geometry.x+x,y,1,1,
exception);
if ((p == (const Quantum *) NULL) ||
(GetPixelAlpha(bottom_image,p) != TransparentAlpha) ||
((y+i) >= (ssize_t) gap))
break;
}
if ((y+i) < (ssize_t) gap)
gap=(size_t) (y+i);
}
bottom_view=DestroyCacheView(bottom_view);
top_view=DestroyCacheView(top_view);
if (x < (ssize_t) smush_image->columns)
return(offset);
return((ssize_t) gap-offset);
}
MagickExport Image *SmushImages(const Image *images,
const MagickBooleanType stack,const ssize_t offset,ExceptionInfo *exception)
{
#define SmushImageTag "Smush/Image"
const Image
*image;
Image
*smush_image;
MagickBooleanType
proceed,
status;
MagickOffsetType
n;
PixelTrait
alpha_trait;
RectangleInfo
geometry;
const Image
*next;
size_t
height,
number_images,
width;
ssize_t
x_offset,
y_offset;
/*
Compute maximum area of smushed area.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=images;
alpha_trait=image->alpha_trait;
number_images=1;
width=image->columns;
height=image->rows;
next=GetNextImageInList(image);
for ( ; next != (Image *) NULL; next=GetNextImageInList(next))
{
if (next->alpha_trait != UndefinedPixelTrait)
alpha_trait=BlendPixelTrait;
number_images++;
if (stack != MagickFalse)
{
if (next->columns > width)
width=next->columns;
height+=next->rows;
if (next->previous != (Image *) NULL)
height+=offset;
continue;
}
width+=next->columns;
if (next->previous != (Image *) NULL)
width+=offset;
if (next->rows > height)
height=next->rows;
}
/*
Smush images.
*/
smush_image=CloneImage(image,width,height,MagickTrue,exception);
if (smush_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(smush_image,DirectClass,exception) == MagickFalse)
{
smush_image=DestroyImage(smush_image);
return((Image *) NULL);
}
smush_image->alpha_trait=alpha_trait;
(void) SetImageBackgroundColor(smush_image,exception);
status=MagickTrue;
x_offset=0;
y_offset=0;
for (n=0; n < (MagickOffsetType) number_images; n++)
{
SetGeometry(smush_image,&geometry);
GravityAdjustGeometry(image->columns,image->rows,image->gravity,&geometry);
if (stack != MagickFalse)
{
x_offset-=geometry.x;
y_offset-=SmushYGap(smush_image,image,offset,exception);
}
else
{
x_offset-=SmushXGap(smush_image,image,offset,exception);
y_offset-=geometry.y;
}
status=CompositeImage(smush_image,image,OverCompositeOp,MagickTrue,x_offset,
y_offset,exception);
proceed=SetImageProgress(image,SmushImageTag,n,number_images);
if (proceed == MagickFalse)
break;
if (stack == MagickFalse)
{
x_offset+=(ssize_t) image->columns;
y_offset=0;
}
else
{
x_offset=0;
y_offset+=(ssize_t) image->rows;
}
image=GetNextImageInList(image);
}
if (stack == MagickFalse)
smush_image->columns=(size_t) x_offset;
else
smush_image->rows=(size_t) y_offset;
if (status == MagickFalse)
smush_image=DestroyImage(smush_image);
return(smush_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t r i p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StripImage() strips an image of all profiles and comments.
%
% The format of the StripImage method is:
%
% MagickBooleanType StripImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType StripImage(Image *image,ExceptionInfo *exception)
{
MagickBooleanType
status;
magick_unreferenced(exception);
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
DestroyImageProfiles(image);
(void) DeleteImageProperty(image,"comment");
(void) DeleteImageProperty(image,"date:create");
(void) DeleteImageProperty(image,"date:modify");
status=SetImageArtifact(image,"png:exclude-chunk",
"bKGD,caNv,cHRM,eXIf,gAMA,iCCP,iTXt,pHYs,sRGB,tEXt,zCCP,zTXt,date");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImage() initializes the red, green, and blue intensities of each pixel
% as defined by the colormap index.
%
% The format of the SyncImage method is:
%
% MagickBooleanType SyncImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Quantum PushColormapIndex(Image *image,const Quantum index,
MagickBooleanType *range_exception)
{
if ((size_t) index < image->colors)
return(index);
*range_exception=MagickTrue;
return((Quantum) 0);
}
MagickExport MagickBooleanType SyncImage(Image *image,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
range_exception,
status,
taint;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (image->ping != MagickFalse)
return(MagickTrue);
if (image->storage_class != PseudoClass)
return(MagickFalse);
assert(image->colormap != (PixelInfo *) NULL);
range_exception=MagickFalse;
status=MagickTrue;
taint=image->taint;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(range_exception,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
index;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
index=PushColormapIndex(image,GetPixelIndex(image,q),&range_exception);
SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->taint=taint;
if ((image->ping == MagickFalse) && (range_exception != MagickFalse))
(void) ThrowMagickException(exception,GetMagickModule(),
CorruptImageWarning,"InvalidColormapIndex","`%s'",image->filename);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c I m a g e S e t t i n g s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImageSettings() syncs any image_info global options into per-image
% attributes.
%
% Note: in IMv6 free form 'options' were always mapped into 'artifacts', so
% that operations and coders can find such settings. In IMv7 if a desired
% per-image artifact is not set, then it will directly look for a global
% option as a fallback, as such this copy is no longer needed, only the
% link set up.
%
% The format of the SyncImageSettings method is:
%
% MagickBooleanType SyncImageSettings(const ImageInfo *image_info,
% Image *image,ExceptionInfo *exception)
% MagickBooleanType SyncImagesSettings(const ImageInfo *image_info,
% Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SyncImagesSettings(ImageInfo *image_info,
Image *images,ExceptionInfo *exception)
{
Image
*image;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
image=images;
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
(void) SyncImageSettings(image_info,image,exception);
(void) DeleteImageOption(image_info,"page");
return(MagickTrue);
}
MagickExport MagickBooleanType SyncImageSettings(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
const char
*option;
GeometryInfo
geometry_info;
MagickStatusType
flags;
ResolutionType
units;
/*
Sync image options.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
option=GetImageOption(image_info,"background");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&image->background_color,
exception);
option=GetImageOption(image_info,"black-point-compensation");
if (option != (const char *) NULL)
image->black_point_compensation=(MagickBooleanType) ParseCommandOption(
MagickBooleanOptions,MagickFalse,option);
option=GetImageOption(image_info,"blue-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
if ((flags & RhoValue) != 0)
image->chromaticity.blue_primary.x=geometry_info.rho;
image->chromaticity.blue_primary.y=image->chromaticity.blue_primary.x;
if ((flags & SigmaValue) != 0)
image->chromaticity.blue_primary.y=geometry_info.sigma;
}
option=GetImageOption(image_info,"bordercolor");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&image->border_color,
exception);
/* FUTURE: do not sync compose to per-image compose setting here */
option=GetImageOption(image_info,"compose");
if (option != (const char *) NULL)
image->compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions,
MagickFalse,option);
/* -- */
option=GetImageOption(image_info,"compress");
if (option != (const char *) NULL)
image->compression=(CompressionType) ParseCommandOption(
MagickCompressOptions,MagickFalse,option);
option=GetImageOption(image_info,"debug");
if (option != (const char *) NULL)
image->debug=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions,
MagickFalse,option);
option=GetImageOption(image_info,"density");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
if ((flags & RhoValue) != 0)
image->resolution.x=geometry_info.rho;
image->resolution.y=image->resolution.x;
if ((flags & SigmaValue) != 0)
image->resolution.y=geometry_info.sigma;
}
option=GetImageOption(image_info,"depth");
if (option != (const char *) NULL)
image->depth=StringToUnsignedLong(option);
option=GetImageOption(image_info,"endian");
if (option != (const char *) NULL)
image->endian=(EndianType) ParseCommandOption(MagickEndianOptions,
MagickFalse,option);
option=GetImageOption(image_info,"filter");
if (option != (const char *) NULL)
image->filter=(FilterType) ParseCommandOption(MagickFilterOptions,
MagickFalse,option);
option=GetImageOption(image_info,"fuzz");
if (option != (const char *) NULL)
image->fuzz=StringToDoubleInterval(option,(double) QuantumRange+1.0);
option=GetImageOption(image_info,"gravity");
if (option != (const char *) NULL)
image->gravity=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,option);
option=GetImageOption(image_info,"green-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
if ((flags & RhoValue) != 0)
image->chromaticity.green_primary.x=geometry_info.rho;
image->chromaticity.green_primary.y=image->chromaticity.green_primary.x;
if ((flags & SigmaValue) != 0)
image->chromaticity.green_primary.y=geometry_info.sigma;
}
option=GetImageOption(image_info,"intent");
if (option != (const char *) NULL)
image->rendering_intent=(RenderingIntent) ParseCommandOption(
MagickIntentOptions,MagickFalse,option);
option=GetImageOption(image_info,"intensity");
if (option != (const char *) NULL)
image->intensity=(PixelIntensityMethod) ParseCommandOption(
MagickPixelIntensityOptions,MagickFalse,option);
option=GetImageOption(image_info,"interlace");
if (option != (const char *) NULL)
image->interlace=(InterlaceType) ParseCommandOption(MagickInterlaceOptions,
MagickFalse,option);
option=GetImageOption(image_info,"interpolate");
if (option != (const char *) NULL)
image->interpolate=(PixelInterpolateMethod) ParseCommandOption(
MagickInterpolateOptions,MagickFalse,option);
option=GetImageOption(image_info,"loop");
if (option != (const char *) NULL)
image->iterations=StringToUnsignedLong(option);
option=GetImageOption(image_info,"mattecolor");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&image->matte_color,
exception);
option=GetImageOption(image_info,"orient");
if (option != (const char *) NULL)
image->orientation=(OrientationType) ParseCommandOption(
MagickOrientationOptions,MagickFalse,option);
option=GetImageOption(image_info,"page");
if (option != (const char *) NULL)
{
char
*geometry;
geometry=GetPageGeometry(option);
flags=ParseAbsoluteGeometry(geometry,&image->page);
geometry=DestroyString(geometry);
}
option=GetImageOption(image_info,"quality");
if (option != (const char *) NULL)
image->quality=StringToUnsignedLong(option);
option=GetImageOption(image_info,"red-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
if ((flags & RhoValue) != 0)
image->chromaticity.red_primary.x=geometry_info.rho;
image->chromaticity.red_primary.y=image->chromaticity.red_primary.x;
if ((flags & SigmaValue) != 0)
image->chromaticity.red_primary.y=geometry_info.sigma;
}
if (image_info->quality != UndefinedCompressionQuality)
image->quality=image_info->quality;
option=GetImageOption(image_info,"scene");
if (option != (const char *) NULL)
image->scene=StringToUnsignedLong(option);
option=GetImageOption(image_info,"taint");
if (option != (const char *) NULL)
image->taint=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions,
MagickFalse,option);
option=GetImageOption(image_info,"tile-offset");
if (option != (const char *) NULL)
{
char
*geometry;
geometry=GetPageGeometry(option);
flags=ParseAbsoluteGeometry(geometry,&image->tile_offset);
geometry=DestroyString(geometry);
}
option=GetImageOption(image_info,"transparent-color");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&image->transparent_color,
exception);
option=GetImageOption(image_info,"type");
if (option != (const char *) NULL)
image->type=(ImageType) ParseCommandOption(MagickTypeOptions,MagickFalse,
option);
option=GetImageOption(image_info,"units");
units=image_info->units;
if (option != (const char *) NULL)
units=(ResolutionType) ParseCommandOption(MagickResolutionOptions,
MagickFalse,option);
if (units != UndefinedResolution)
{
if (image->units != units)
switch (image->units)
{
case PixelsPerInchResolution:
{
if (units == PixelsPerCentimeterResolution)
{
image->resolution.x/=2.54;
image->resolution.y/=2.54;
}
break;
}
case PixelsPerCentimeterResolution:
{
if (units == PixelsPerInchResolution)
{
image->resolution.x=(double) ((size_t) (100.0*2.54*
image->resolution.x+0.5))/100.0;
image->resolution.y=(double) ((size_t) (100.0*2.54*
image->resolution.y+0.5))/100.0;
}
break;
}
default:
break;
}
image->units=units;
option=GetImageOption(image_info,"density");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
if ((flags & RhoValue) != 0)
image->resolution.x=geometry_info.rho;
image->resolution.y=image->resolution.x;
if ((flags & SigmaValue) != 0)
image->resolution.y=geometry_info.sigma;
}
}
option=GetImageOption(image_info,"virtual-pixel");
if (option != (const char *) NULL)
(void) SetImageVirtualPixelMethod(image,(VirtualPixelMethod)
ParseCommandOption(MagickVirtualPixelOptions,MagickFalse,option),
exception);
option=GetImageOption(image_info,"white-point");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
if ((flags & RhoValue) != 0)
image->chromaticity.white_point.x=geometry_info.rho;
image->chromaticity.white_point.y=image->chromaticity.white_point.x;
if ((flags & SigmaValue) != 0)
image->chromaticity.white_point.y=geometry_info.sigma;
}
/*
Pointer to allow the lookup of pre-image artifact will fallback to a global
option setting/define. This saves a lot of duplication of global options
into per-image artifacts, while ensuring only specifically set per-image
artifacts are preserved when parenthesis ends.
*/
if (image->image_info != (ImageInfo *) NULL)
image->image_info=DestroyImageInfo(image->image_info);
image->image_info=CloneImageInfo(image_info);
return(MagickTrue);
}
|
GB_unaryop__abs_bool_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_bool_int64
// op(A') function: GB_tran__abs_bool_int64
// C type: bool
// A type: int64_t
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
bool z = (bool) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_BOOL || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_bool_int64
(
bool *restrict Cx,
const int64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_bool_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
mixed_tentusscher_myo_epi_2004_S2_14.c | // Scenario 1 - Mixed-Model TenTusscher 2004 (Myocardium + Epicardium)
// (AP + max:dvdt)
#include <stdio.h>
#include "mixed_tentusscher_myo_epi_2004_S2_14.h"
GET_CELL_MODEL_DATA(init_cell_model_data)
{
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu)
{
static bool first_call = true;
if(first_call)
{
print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium CPU model\n");
first_call = false;
}
// Get the mapping array
uint32_t *mapping = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
// Initial conditions for TenTusscher myocardium
if (mapping[sv_id] == 0)
{
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633};
for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
// Initial conditions for TenTusscher epicardium
else
{
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.5952182591768,0.00128266400523176,0.780370393090429,0.780208222766858,0.000174041905078485,0.485370727173588,0.00293466121399432,0.999998357055344,1.92482840573537e-08,1.88428105751378e-05,0.999770837182767,1.00699532179645,0.999993733315635,4.75139548173797e-05,0.266377866651071,10.2975786179389,139.536672800382}; for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu)
{
// Get the mapping array
uint32_t *mapping = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
uint32_t sv_id;
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++)
{
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = (uint32_t )i;
for (int j = 0; j < num_steps; ++j)
{
if (mapping[i] == 0)
solve_model_ode_cpu_myo(dt, sv + (sv_id * NEQ), stim_currents[i]);
else
solve_model_ode_cpu_epi(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
void solve_model_ode_cpu_myo (real dt, real *sv, real stim_current)
{
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu_myo(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu_myo(const real *sv, real *rDY_, real stim_current, real dt)
{
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Myocardium cell
real Gks=0.062;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Myocardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
Irel=A*sd*sg;
Ileak=0.00008f*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
// [!] Myocardium cell
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
void solve_model_ode_cpu_epi (real dt, real *sv, real stim_current)
{
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu_epi(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu_epi(const real *sv, real *rDY_, real stim_current, real dt)
{
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Epicardium cell
real Gks=0.245;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Epicardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={14.5369194152843,0.000421161732329444,0.000123555730992675,0.000438546024943873,0.268273630830681,0.123585165023946,0.171035514336793,5.02847725301225,0.0110176202871206,1.84752137000130,1095.52052508604,0.000393152126659795,0.528629865494676,0.00975540076461500,0.00491948125354052,8.11442676720905e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
GeneralMatrixMatrix.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_GENERAL_MATRIX_MATRIX_H
#define EIGEN_GENERAL_MATRIX_MATRIX_H
namespace Eigen {
namespace internal {
template<typename _LhsScalar, typename _RhsScalar> class level3_blocking;
/* Specialization for a row-major destination matrix => simple transposition of the product */
template<
typename Index,
typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs,
int ResInnerStride>
struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor,ResInnerStride>
{
typedef gebp_traits<RhsScalar,LhsScalar> Traits;
typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
static EIGEN_STRONG_INLINE void run(
Index rows, Index cols, Index depth,
const LhsScalar* lhs, Index lhsStride,
const RhsScalar* rhs, Index rhsStride,
ResScalar* res, Index resIncr, Index resStride,
ResScalar alpha,
level3_blocking<RhsScalar,LhsScalar>& blocking,
GemmParallelInfo<Index>* info = 0)
{
// transpose the product such that the result is column major
general_matrix_matrix_product<Index,
RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs,
LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs,
ColMajor,ResInnerStride>
::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resIncr,resStride,alpha,blocking,info);
}
};
/* Specialization for a col-major destination matrix
* => Blocking algorithm following Goto's paper */
template<
typename Index,
typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs,
int ResInnerStride>
struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor,ResInnerStride>
{
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;
static void run(Index rows, Index cols, Index depth,
const LhsScalar* _lhs, Index lhsStride,
const RhsScalar* _rhs, Index rhsStride,
ResScalar* _res, Index resIncr, Index resStride,
ResScalar alpha,
level3_blocking<LhsScalar,RhsScalar>& blocking,
GemmParallelInfo<Index>* info = 0)
{
typedef const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> LhsMapper;
typedef const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> RhsMapper;
typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor,Unaligned,ResInnerStride> ResMapper;
LhsMapper lhs(_lhs, lhsStride);
RhsMapper rhs(_rhs, rhsStride);
ResMapper res(_res, resStride, resIncr);
Index kc = blocking.kc(); // cache block size along the K direction
Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction
Index nc = (std::min)(cols,blocking.nc()); // cache block size along the N direction
gemm_pack_lhs<LhsScalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
gemm_pack_rhs<RhsScalar, Index, RhsMapper, Traits::nr, RhsStorageOrder> pack_rhs;
gebp_kernel<LhsScalar, RhsScalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp;
#ifdef EIGEN_HAS_OPENMP
if(info)
{
// this is the parallel version!
int tid = omp_get_thread_num();
int threads = omp_get_num_threads();
LhsScalar* blockA = blocking.blockA();
eigen_internal_assert(blockA!=0);
std::size_t sizeB = kc*nc;
ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, 0);
// For each horizontal panel of the rhs, and corresponding vertical panel of the lhs...
for(Index k=0; k<depth; k+=kc)
{
const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A'
// In order to reduce the chance that a thread has to wait for the other,
// let's start by packing B'.
pack_rhs(blockB, rhs.getSubMapper(k,0), actual_kc, nc);
// Pack A_k to A' in a parallel fashion:
// each thread packs the sub block A_k,i to A'_i where i is the thread id.
// However, before copying to A'_i, we have to make sure that no other thread is still using it,
// i.e., we test that info[tid].users equals 0.
// Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it.
while(info[tid].users!=0) {}
info[tid].users += threads;
pack_lhs(blockA+info[tid].lhs_start*actual_kc, lhs.getSubMapper(info[tid].lhs_start,k), actual_kc, info[tid].lhs_length);
// Notify the other threads that the part A'_i is ready to go.
info[tid].sync = k;
// Computes C_i += A' * B' per A'_i
for(int shift=0; shift<threads; ++shift)
{
int i = (tid+shift)%threads;
// At this point we have to make sure that A'_i has been updated by the thread i,
// we use testAndSetOrdered to mimic a volatile access.
// However, no need to wait for the B' part which has been updated by the current thread!
if (shift>0) {
while(info[i].sync!=k) {
}
}
gebp(res.getSubMapper(info[i].lhs_start, 0), blockA+info[i].lhs_start*actual_kc, blockB, info[i].lhs_length, actual_kc, nc, alpha);
}
// Then keep going as usual with the remaining B'
for(Index j=nc; j<cols; j+=nc)
{
const Index actual_nc = (std::min)(j+nc,cols)-j;
// pack B_k,j to B'
pack_rhs(blockB, rhs.getSubMapper(k,j), actual_kc, actual_nc);
// C_j += A' * B'
gebp(res.getSubMapper(0, j), blockA, blockB, rows, actual_kc, actual_nc, alpha);
}
// Release all the sub blocks A'_i of A' for the current thread,
// i.e., we simply decrement the number of users by 1
for(Index i=0; i<threads; ++i)
#pragma omp atomic
info[i].users -= 1;
}
}
else
#endif // EIGEN_HAS_OPENMP
{
EIGEN_UNUSED_VARIABLE(info);
// this is the sequential version!
std::size_t sizeA = kc*mc;
std::size_t sizeB = kc*nc;
ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA());
ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB());
const bool pack_rhs_once = mc!=rows && kc==depth && nc==cols;
// For each horizontal panel of the rhs, and corresponding panel of the lhs...
for(Index i2=0; i2<rows; i2+=mc)
{
const Index actual_mc = (std::min)(i2+mc,rows)-i2;
for(Index k2=0; k2<depth; k2+=kc)
{
const Index actual_kc = (std::min)(k2+kc,depth)-k2;
// OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs.
// => Pack lhs's panel into a sequential chunk of memory (L2/L3 caching)
// Note that this panel will be read as many times as the number of blocks in the rhs's
// horizontal panel which is, in practice, a very low number.
pack_lhs(blockA, lhs.getSubMapper(i2,k2), actual_kc, actual_mc);
// For each kc x nc block of the rhs's horizontal panel...
for(Index j2=0; j2<cols; j2+=nc)
{
const Index actual_nc = (std::min)(j2+nc,cols)-j2;
// We pack the rhs's block into a sequential chunk of memory (L2 caching)
// Note that this block will be read a very high number of times, which is equal to the number of
// micro horizontal panel of the large rhs's panel (e.g., rows/12 times).
if((!pack_rhs_once) || i2==0)
pack_rhs(blockB, rhs.getSubMapper(k2,j2), actual_kc, actual_nc);
// Everything is packed, we can now call the panel * block kernel:
gebp(res.getSubMapper(i2, j2), blockA, blockB, actual_mc, actual_kc, actual_nc, alpha);
}
}
}
}
}
};
/*********************************************************************************
* Specialization of generic_product_impl for "large" GEMM, i.e.,
* implementation of the high level wrapper to general_matrix_matrix_product
**********************************************************************************/
template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType>
struct gemm_functor
{
gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, const Scalar& actualAlpha, BlockingType& blocking)
: m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking)
{}
void initParallelSession(Index num_threads) const
{
m_blocking.initParallel(m_lhs.rows(), m_rhs.cols(), m_lhs.cols(), num_threads);
m_blocking.allocateA();
}
void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo<Index>* info=0) const
{
if(cols==-1)
cols = m_rhs.cols();
Gemm::run(rows, cols, m_lhs.cols(),
&m_lhs.coeffRef(row,0), m_lhs.outerStride(),
&m_rhs.coeffRef(0,col), m_rhs.outerStride(),
(Scalar*)&(m_dest.coeffRef(row,col)), m_dest.innerStride(), m_dest.outerStride(),
m_actualAlpha, m_blocking, info);
}
typedef typename Gemm::Traits Traits;
protected:
const Lhs& m_lhs;
const Rhs& m_rhs;
Dest& m_dest;
Scalar m_actualAlpha;
BlockingType& m_blocking;
};
template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor=1,
bool FiniteAtCompileTime = MaxRows!=Dynamic && MaxCols!=Dynamic && MaxDepth != Dynamic> class gemm_blocking_space;
template<typename _LhsScalar, typename _RhsScalar>
class level3_blocking
{
typedef _LhsScalar LhsScalar;
typedef _RhsScalar RhsScalar;
protected:
LhsScalar* m_blockA;
RhsScalar* m_blockB;
Index m_mc;
Index m_nc;
Index m_kc;
public:
level3_blocking()
: m_blockA(0), m_blockB(0), m_mc(0), m_nc(0), m_kc(0)
{}
inline Index mc() const { return m_mc; }
inline Index nc() const { return m_nc; }
inline Index kc() const { return m_kc; }
inline LhsScalar* blockA() { return m_blockA; }
inline RhsScalar* blockB() { return m_blockB; }
};
template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor>
class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, true /* == FiniteAtCompileTime */>
: public level3_blocking<
typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,
typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>
{
enum {
Transpose = StorageOrder==RowMajor,
ActualRows = Transpose ? MaxCols : MaxRows,
ActualCols = Transpose ? MaxRows : MaxCols
};
typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar;
typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
enum {
SizeA = ActualRows * MaxDepth,
SizeB = ActualCols * MaxDepth
};
#if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES
EIGEN_ALIGN_MAX LhsScalar m_staticA[SizeA];
EIGEN_ALIGN_MAX RhsScalar m_staticB[SizeB];
#else
EIGEN_ALIGN_MAX char m_staticA[SizeA * sizeof(LhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1];
EIGEN_ALIGN_MAX char m_staticB[SizeB * sizeof(RhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1];
#endif
public:
gemm_blocking_space(Index /*rows*/, Index /*cols*/, Index /*depth*/, Index /*num_threads*/, bool /*full_rows = false*/)
{
this->m_mc = ActualRows;
this->m_nc = ActualCols;
this->m_kc = MaxDepth;
#if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES
this->m_blockA = m_staticA;
this->m_blockB = m_staticB;
#else
this->m_blockA = reinterpret_cast<LhsScalar*>((internal::UIntPtr(m_staticA) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1));
this->m_blockB = reinterpret_cast<RhsScalar*>((internal::UIntPtr(m_staticB) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1));
#endif
}
void initParallel(Index, Index, Index, Index)
{}
inline void allocateA() {}
inline void allocateB() {}
inline void allocateAll() {}
};
template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor>
class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, false>
: public level3_blocking<
typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,
typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>
{
enum {
Transpose = StorageOrder==RowMajor
};
typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar;
typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
Index m_sizeA;
Index m_sizeB;
public:
gemm_blocking_space(Index rows, Index cols, Index depth, Index num_threads, bool l3_blocking)
{
this->m_mc = Transpose ? cols : rows;
this->m_nc = Transpose ? rows : cols;
this->m_kc = depth;
if(l3_blocking)
{
computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, this->m_nc, num_threads);
}
else // no l3 blocking
{
Index n = this->m_nc;
computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, n, num_threads);
}
m_sizeA = this->m_mc * this->m_kc;
m_sizeB = this->m_kc * this->m_nc;
}
void initParallel(Index rows, Index cols, Index depth, Index num_threads)
{
this->m_mc = Transpose ? cols : rows;
this->m_nc = Transpose ? rows : cols;
this->m_kc = depth;
eigen_internal_assert(this->m_blockA==0 && this->m_blockB==0);
Index m = this->m_mc;
computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, m, this->m_nc, num_threads);
m_sizeA = this->m_mc * this->m_kc;
m_sizeB = this->m_kc * this->m_nc;
}
void allocateA()
{
if(this->m_blockA==0)
this->m_blockA = aligned_new<LhsScalar>(m_sizeA);
}
void allocateB()
{
if(this->m_blockB==0)
this->m_blockB = aligned_new<RhsScalar>(m_sizeB);
}
void allocateAll()
{
allocateA();
allocateB();
}
~gemm_blocking_space()
{
aligned_delete(this->m_blockA, m_sizeA);
aligned_delete(this->m_blockB, m_sizeB);
}
};
} // end namespace internal
namespace internal {
template<typename Lhs, typename Rhs>
struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct>
: generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct> >
{
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
typedef typename Lhs::Scalar LhsScalar;
typedef typename Rhs::Scalar RhsScalar;
typedef internal::blas_traits<Lhs> LhsBlasTraits;
typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType;
typedef typename internal::remove_all<ActualLhsType>::type ActualLhsTypeCleaned;
typedef internal::blas_traits<Rhs> RhsBlasTraits;
typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType;
typedef typename internal::remove_all<ActualRhsType>::type ActualRhsTypeCleaned;
enum {
MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime)
};
typedef generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,CoeffBasedProductMode> lazyproduct;
template<typename Dst>
static void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0)
lazyproduct::eval_dynamic(dst, lhs, rhs, internal::assign_op<typename Dst::Scalar,Scalar>());
else
{
dst.setZero();
scaleAndAddTo(dst, lhs, rhs, Scalar(1));
}
}
template<typename Dst>
static void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0)
lazyproduct::eval_dynamic(dst, lhs, rhs, internal::add_assign_op<typename Dst::Scalar,Scalar>());
else
scaleAndAddTo(dst,lhs, rhs, Scalar(1));
}
template<typename Dst>
static void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)
{
if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0)
lazyproduct::eval_dynamic(dst, lhs, rhs, internal::sub_assign_op<typename Dst::Scalar,Scalar>());
else
scaleAndAddTo(dst, lhs, rhs, Scalar(-1));
}
template<typename Dest>
static void scaleAndAddTo(Dest& dst, const Lhs& a_lhs, const Rhs& a_rhs, const Scalar& alpha)
{
eigen_assert(dst.rows()==a_lhs.rows() && dst.cols()==a_rhs.cols());
if(a_lhs.cols()==0 || a_lhs.rows()==0 || a_rhs.cols()==0)
return;
typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(a_lhs);
typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(a_rhs);
Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(a_lhs)
* RhsBlasTraits::extractScalarFactor(a_rhs);
typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar,
Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType;
typedef internal::gemm_functor<
Scalar, Index,
internal::general_matrix_matrix_product<
Index,
LhsScalar, (ActualLhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate),
RhsScalar, (ActualRhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate),
(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,
Dest::InnerStrideAtCompileTime>,
ActualLhsTypeCleaned, ActualRhsTypeCleaned, Dest, BlockingType> GemmFunctor;
BlockingType blocking(dst.rows(), dst.cols(), lhs.cols(), 1, true);
internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)>
(GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), a_lhs.rows(), a_rhs.cols(), a_lhs.cols(), Dest::Flags&RowMajorBit);
}
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_GENERAL_MATRIX_MATRIX_H
|
atomic_read_codegen.c | // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp -x c -emit-llvm %s -o - | FileCheck %s
// RUN: %clang_cc1 -fopenmp -x c -triple x86_64-apple-darwin10 -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp -x c -triple x86_64-apple-darwin10 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
// expected-no-diagnostics
// REQUIRES: x86-registered-target
#ifndef HEADER
#define HEADER
_Bool bv, bx;
char cv, cx;
unsigned char ucv, ucx;
short sv, sx;
unsigned short usv, usx;
int iv, ix;
unsigned int uiv, uix;
long lv, lx;
unsigned long ulv, ulx;
long long llv, llx;
unsigned long long ullv, ullx;
float fv, fx;
double dv, dx;
long double ldv, ldx;
_Complex int civ, cix;
_Complex float cfv, cfx;
_Complex double cdv, cdx;
typedef int int4 __attribute__((__vector_size__(16)));
int4 int4x;
struct BitFields {
int : 32;
int a : 31;
} bfx;
struct BitFields_packed {
int : 32;
int a : 31;
} __attribute__ ((__packed__)) bfx_packed;
struct BitFields2 {
int : 31;
int a : 1;
} bfx2;
struct BitFields2_packed {
int : 31;
int a : 1;
} __attribute__ ((__packed__)) bfx2_packed;
struct BitFields3 {
int : 11;
int a : 14;
} bfx3;
struct BitFields3_packed {
int : 11;
int a : 14;
} __attribute__ ((__packed__)) bfx3_packed;
struct BitFields4 {
short : 16;
int a: 1;
long b : 7;
} bfx4;
struct BitFields4_packed {
short : 16;
int a: 1;
long b : 7;
} __attribute__ ((__packed__)) bfx4_packed;
typedef float float2 __attribute__((ext_vector_type(2)));
float2 float2x;
register int rix __asm__("0");
int main() {
// CHECK: load atomic i8, i8*
// CHECK: store i8
#pragma omp atomic read
bv = bx;
// CHECK: load atomic i8, i8*
// CHECK: store i8
#pragma omp atomic read
cv = cx;
// CHECK: load atomic i8, i8*
// CHECK: store i8
#pragma omp atomic read
ucv = ucx;
// CHECK: load atomic i16, i16*
// CHECK: store i16
#pragma omp atomic read
sv = sx;
// CHECK: load atomic i16, i16*
// CHECK: store i16
#pragma omp atomic read
usv = usx;
// CHECK: load atomic i32, i32*
// CHECK: store i32
#pragma omp atomic read
iv = ix;
// CHECK: load atomic i32, i32*
// CHECK: store i32
#pragma omp atomic read
uiv = uix;
// CHECK: load atomic i64, i64*
// CHECK: store i64
#pragma omp atomic read
lv = lx;
// CHECK: load atomic i64, i64*
// CHECK: store i64
#pragma omp atomic read
ulv = ulx;
// CHECK: load atomic i64, i64*
// CHECK: store i64
#pragma omp atomic read
llv = llx;
// CHECK: load atomic i64, i64*
// CHECK: store i64
#pragma omp atomic read
ullv = ullx;
// CHECK: load atomic i32, i32* bitcast (float*
// CHECK: bitcast i32 {{.*}} to float
// CHECK: store float
#pragma omp atomic read
fv = fx;
// CHECK: load atomic i64, i64* bitcast (double*
// CHECK: bitcast i64 {{.*}} to double
// CHECK: store double
#pragma omp atomic read
dv = dx;
// CHECK: [[LD:%.+]] = load atomic i128, i128* bitcast (x86_fp80*
// CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[LDTEMP:%.*]] to i128*
// CHECK: store i128 [[LD]], i128* [[BITCAST]]
// CHECK: [[LD:%.+]] = load x86_fp80, x86_fp80* [[LDTEMP]]
// CHECK: store x86_fp80 [[LD]]
#pragma omp atomic read
ldv = ldx;
// CHECK: call{{.*}} void @__atomic_load(i64 8,
// CHECK: store i32
// CHECK: store i32
#pragma omp atomic read
civ = cix;
// CHECK: call{{.*}} void @__atomic_load(i64 8,
// CHECK: store float
// CHECK: store float
#pragma omp atomic read
cfv = cfx;
// CHECK: call{{.*}} void @__atomic_load(i64 16,
// CHECK: call{{.*}} @__kmpc_flush(
// CHECK: store double
// CHECK: store double
#pragma omp atomic seq_cst read
cdv = cdx;
// CHECK: load atomic i64, i64*
// CHECK: store i8
#pragma omp atomic read
bv = ulx;
// CHECK: load atomic i8, i8*
// CHECK: store i8
#pragma omp atomic read
cv = bx;
// CHECK: load atomic i8, i8*
// CHECK: call{{.*}} @__kmpc_flush(
// CHECK: store i8
#pragma omp atomic read, seq_cst
ucv = cx;
// CHECK: load atomic i64, i64*
// CHECK: store i16
#pragma omp atomic read
sv = ulx;
// CHECK: load atomic i64, i64*
// CHECK: store i16
#pragma omp atomic read
usv = lx;
// CHECK: load atomic i32, i32*
// CHECK: call{{.*}} @__kmpc_flush(
// CHECK: store i32
#pragma omp atomic seq_cst, read
iv = uix;
// CHECK: load atomic i32, i32*
// CHECK: store i32
#pragma omp atomic read
uiv = ix;
// CHECK: call{{.*}} void @__atomic_load(i64 8,
// CHECK: store i64
#pragma omp atomic read
lv = cix;
// CHECK: load atomic i32, i32*
// CHECK: store i64
#pragma omp atomic read
ulv = fx;
// CHECK: load atomic i64, i64*
// CHECK: store i64
#pragma omp atomic read
llv = dx;
// CHECK: load atomic i128, i128*
// CHECK: store i64
#pragma omp atomic read
ullv = ldx;
// CHECK: call{{.*}} void @__atomic_load(i64 8,
// CHECK: store float
#pragma omp atomic read
fv = cix;
// CHECK: load atomic i16, i16*
// CHECK: store double
#pragma omp atomic read
dv = sx;
// CHECK: load atomic i8, i8*
// CHECK: store x86_fp80
#pragma omp atomic read
ldv = bx;
// CHECK: load atomic i8, i8*
// CHECK: store i32
// CHECK: store i32
#pragma omp atomic read
civ = bx;
// CHECK: load atomic i16, i16*
// CHECK: store float
// CHECK: store float
#pragma omp atomic read
cfv = usx;
// CHECK: load atomic i64, i64*
// CHECK: store double
// CHECK: store double
#pragma omp atomic read
cdv = llx;
// CHECK: [[I128VAL:%.+]] = load atomic i128, i128* bitcast (<4 x i32>* @{{.+}} to i128*) monotonic
// CHECK: [[I128PTR:%.+]] = bitcast <4 x i32>* [[LDTEMP:%.+]] to i128*
// CHECK: store i128 [[I128VAL]], i128* [[I128PTR]]
// CHECK: [[LD:%.+]] = load <4 x i32>, <4 x i32>* [[LDTEMP]]
// CHECK: extractelement <4 x i32> [[LD]]
// CHECK: store i8
#pragma omp atomic read
bv = int4x[0];
// CHECK: [[LD:%.+]] = load atomic i32, i32* bitcast (i8* getelementptr (i8, i8* bitcast (%{{.+}}* @{{.+}} to i8*), i64 4) to i32*) monotonic
// CHECK: store i32 [[LD]], i32* [[LDTEMP:%.+]]
// CHECK: [[LD:%.+]] = load i32, i32* [[LDTEMP]]
// CHECK: [[SHL:%.+]] = shl i32 [[LD]], 1
// CHECK: ashr i32 [[SHL]], 1
// CHECK: store x86_fp80
#pragma omp atomic read
ldv = bfx.a;
// CHECK: [[LDTEMP_VOID_PTR:%.+]] = bitcast i32* [[LDTEMP:%.+]] to i8*
// CHECK: call void @__atomic_load(i64 4, i8* getelementptr (i8, i8* bitcast (%struct.BitFields_packed* @bfx_packed to i8*), i64 4), i8* [[LDTEMP_VOID_PTR]], i32 0)
// CHECK: [[LD:%.+]] = load i32, i32* [[LDTEMP]]
// CHECK: [[SHL:%.+]] = shl i32 [[LD]], 1
// CHECK: ashr i32 [[SHL]], 1
// CHECK: store x86_fp80
#pragma omp atomic read
ldv = bfx_packed.a;
// CHECK: [[LD:%.+]] = load atomic i32, i32* getelementptr inbounds (%struct.BitFields2, %struct.BitFields2* @bfx2, i32 0, i32 0) monotonic
// CHECK: store i32 [[LD]], i32* [[LDTEMP:%.+]]
// CHECK: [[LD:%.+]] = load i32, i32* [[LDTEMP]]
// CHECK: ashr i32 [[LD]], 31
// CHECK: store x86_fp80
#pragma omp atomic read
ldv = bfx2.a;
// CHECK: [[LD:%.+]] = load atomic i8, i8* getelementptr (i8, i8* bitcast (%struct.BitFields2_packed* @bfx2_packed to i8*), i64 3) monotonic
// CHECK: store i8 [[LD]], i8* [[LDTEMP:%.+]]
// CHECK: [[LD:%.+]] = load i8, i8* [[LDTEMP]]
// CHECK: ashr i8 [[LD]], 7
// CHECK: store x86_fp80
#pragma omp atomic read
ldv = bfx2_packed.a;
// CHECK: [[LD:%.+]] = load atomic i32, i32* getelementptr inbounds (%struct.BitFields3, %struct.BitFields3* @bfx3, i32 0, i32 0) monotonic
// CHECK: store i32 [[LD]], i32* [[LDTEMP:%.+]]
// CHECK: [[LD:%.+]] = load i32, i32* [[LDTEMP]]
// CHECK: [[SHL:%.+]] = shl i32 [[LD]], 7
// CHECK: ashr i32 [[SHL]], 18
// CHECK: store x86_fp80
#pragma omp atomic read
ldv = bfx3.a;
// CHECK: [[LDTEMP_VOID_PTR:%.+]] = bitcast i24* [[LDTEMP:%.+]] to i8*
// CHECK: call void @__atomic_load(i64 3, i8* getelementptr (i8, i8* bitcast (%struct.BitFields3_packed* @bfx3_packed to i8*), i64 1), i8* [[LDTEMP_VOID_PTR]], i32 0)
// CHECK: [[LD:%.+]] = load i24, i24* [[LDTEMP]]
// CHECK: [[SHL:%.+]] = shl i24 [[LD]], 7
// CHECK: [[ASHR:%.+]] = ashr i24 [[SHL]], 10
// CHECK: sext i24 [[ASHR]] to i32
// CHECK: store x86_fp80
#pragma omp atomic read
ldv = bfx3_packed.a;
// CHECK: [[LD:%.+]] = load atomic i64, i64* bitcast (%struct.BitFields4* @bfx4 to i64*) monotonic
// CHECK: store i64 [[LD]], i64* [[LDTEMP:%.+]]
// CHECK: [[LD:%.+]] = load i64, i64* [[LDTEMP]]
// CHECK: [[SHL:%.+]] = shl i64 [[LD]], 47
// CHECK: [[ASHR:%.+]] = ashr i64 [[SHL]], 63
// CHECK: trunc i64 [[ASHR]] to i32
// CHECK: store x86_fp80
#pragma omp atomic read
ldv = bfx4.a;
// CHECK: [[LD:%.+]] = load atomic i8, i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @bfx4_packed, i32 0, i32 0, i64 2) monotonic
// CHECK: store i8 [[LD]], i8* [[LDTEMP:%.+]]
// CHECK: [[LD:%.+]] = load i8, i8* [[LDTEMP]]
// CHECK: [[SHL:%.+]] = shl i8 [[LD]], 7
// CHECK: [[ASHR:%.+]] = ashr i8 [[SHL]], 7
// CHECK: sext i8 [[ASHR]] to i32
// CHECK: store x86_fp80
#pragma omp atomic read
ldv = bfx4_packed.a;
// CHECK: [[LD:%.+]] = load atomic i64, i64* bitcast (%struct.BitFields4* @bfx4 to i64*) monotonic
// CHECK: store i64 [[LD]], i64* [[LDTEMP:%.+]]
// CHECK: [[LD:%.+]] = load i64, i64* [[LDTEMP]]
// CHECK: [[SHL:%.+]] = shl i64 [[LD]], 40
// CHECK: [[ASHR:%.+]] = ashr i64 [[SHL]], 57
// CHECK: store x86_fp80
#pragma omp atomic read
ldv = bfx4.b;
// CHECK: [[LD:%.+]] = load atomic i8, i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @bfx4_packed, i32 0, i32 0, i64 2) monotonic
// CHECK: store i8 [[LD]], i8* [[LDTEMP:%.+]]
// CHECK: [[LD:%.+]] = load i8, i8* [[LDTEMP]]
// CHECK: [[ASHR:%.+]] = ashr i8 [[LD]], 1
// CHECK: sext i8 [[ASHR]] to i64
// CHECK: store x86_fp80
#pragma omp atomic read
ldv = bfx4_packed.b;
// CHECK: [[LD:%.+]] = load atomic i64, i64* bitcast (<2 x float>* @{{.+}} to i64*) monotonic
// CHECK: [[BITCAST:%.+]] = bitcast <2 x float>* [[LDTEMP:%.+]] to i64*
// CHECK: store i64 [[LD]], i64* [[BITCAST]]
// CHECK: [[LD:%.+]] = load <2 x float>, <2 x float>* [[LDTEMP]]
// CHECK: extractelement <2 x float> [[LD]]
// CHECK: store i64
#pragma omp atomic read
ulv = float2x.x;
// CHECK: call{{.*}} i{{[0-9]+}} @llvm.read_register
// CHECK: call{{.*}} @__kmpc_flush(
// CHECK: store double
#pragma omp atomic read seq_cst
dv = rix;
return 0;
}
#endif
|
correlation.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "correlation.h"
/* Array initialization. */
static
void init_array (int m,
int n,
DATA_TYPE *float_n,
DATA_TYPE POLYBENCH_2D(data,M,N,m,n))
{
int i, j;
*float_n = 1.2;
for (i = 0; i < m; i++)
for (j = 0; j < n; j++)
data[i][j] = ((DATA_TYPE) i*j) / M;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int m,
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m))
{
int i, j;
for (i = 0; i < m; i++)
for (j = 0; j < m; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]);
if ((i * m + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_correlation(int m, int n,
DATA_TYPE float_n,
DATA_TYPE POLYBENCH_2D(data,M,N,m,n),
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m),
DATA_TYPE POLYBENCH_1D(mean,M,m),
DATA_TYPE POLYBENCH_1D(stddev,M,m))
{
int i, j, j1, j2;
DATA_TYPE eps = 0.1f;
#define sqrt_of_array_cell(x,j) sqrt(x[j])
#pragma scop
/* Determine mean of column vectors of input data matrix */
#pragma omp parallel private(i, j, j2) num_threads(#P3)
{
#pragma omp for schedule(#P1, #P2)
for (j = 0; j < _PB_M; j++)
{
mean[j] = 0.0;
for (i = 0; i < _PB_N; i++)
mean[j] += data[i][j];
mean[j] /= float_n;
}
/* Determine standard deviations of column vectors of data matrix. */
#pragma omp for schedule(#P1, #P2)
for (j = 0; j < _PB_M; j++)
{
stddev[j] = 0.0;
for (i = 0; i < _PB_N; i++)
stddev[j] += (data[i][j] - mean[j]) * (data[i][j] - mean[j]);
stddev[j] /= float_n;
stddev[j] = sqrt_of_array_cell(stddev, j);
/* The following in an inelegant but usual way to handle
near-zero std. dev. values, which below would cause a zero-
divide. */
stddev[j] = stddev[j] <= eps ? 1.0 : stddev[j];
}
/* Center and reduce the column vectors. */
#pragma omp for schedule(#P1, #P2)
for (i = 0; i < _PB_N; i++)
for (j = 0; j < _PB_M; j++)
{
data[i][j] -= mean[j];
data[i][j] /= sqrt(float_n) * stddev[j];
}
/* Calculate the m * m correlation matrix. */
#pragma omp for schedule(#P1, #P2)
for (j1 = 0; j1 < _PB_M-1; j1++)
{
symmat[j1][j1] = 1.0;
for (j2 = j1+1; j2 < _PB_M; j2++)
{
symmat[j1][j2] = 0.0;
for (i = 0; i < _PB_N; i++)
symmat[j1][j2] += (data[i][j1] * data[i][j2]);
symmat[j2][j1] = symmat[j1][j2];
}
}
}
#pragma endscop
symmat[_PB_M-1][_PB_M-1] = 1.0;
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int n = N;
int m = M;
/* Variable declaration/allocation. */
DATA_TYPE float_n;
POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n);
POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m);
POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m);
POLYBENCH_1D_ARRAY_DECL(stddev,DATA_TYPE,M,m);
/* Initialize array(s). */
init_array (m, n, &float_n, POLYBENCH_ARRAY(data));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_correlation (m, n, float_n,
POLYBENCH_ARRAY(data),
POLYBENCH_ARRAY(symmat),
POLYBENCH_ARRAY(mean),
POLYBENCH_ARRAY(stddev));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(data);
POLYBENCH_FREE_ARRAY(symmat);
POLYBENCH_FREE_ARRAY(mean);
POLYBENCH_FREE_ARRAY(stddev);
return 0;
}
|
fill.h | #pragma once
#include <vector>
#include <unordered_map>
#include <algorithm>
#include <omp.h>
#include "_cuda.h"
using std::vector;
using std::unordered_map;
using std::max;
template <class T>
void fill(T *x, int N, T v) {
for (int i=0; i<N; i++)
x[i] = v;
}
template <class T>
void fill(vector<T>& x, T v) {
fill(x.begin(), x.end(), v);
}
template <class K, class T>
void fill(unordered_map<K, T>& x, T v) {
for (auto& p : x) p.second = v;
}
template <class T, class C>
void fillAt(T *x, C&& is , T v) {
for (int i : is)
x[i] = v;
}
template <class T, class C>
void fillAt(vector<T>& x, C&& is, T v) {
fillAt(x.data(), is, v);
}
template <class K, class T, class C>
void fillAt(unordered_map<K, T>& x, C&& ks, T v) {
for (auto&& k : ks)
x[k] = v;
}
template <class T>
void fillOmp(T *x, int N, T v) {
#pragma omp parallel for
for (int i=0; i<N; i++)
x[i] = v;
}
template <class T>
void fillOmp(vector<T>& x, T v) {
fillOmp(x.data(), x.size(), v);
}
template <class T>
__device__ void fillKernelLoop(T *a, int N, T v, int i, int DI) {
for (; i<N; i+=DI)
a[i] = v;
}
template <class T>
__global__ void fillKernel(T *a, int N, T v) {
DEFINE(t, b, B, G);
fillKernelLoop(a, N, v, B*b+t, G*B);
}
template <class T>
void fillCuda(T *a, int N, T v) {
int threads = _THREADS;
int blocks = min(ceilDiv(N, threads), _BLOCKS);
size_t A1 = N * sizeof(T);
T *aD;
TRY( cudaMalloc(&aD, A1) );
TRY( cudaMemcpy(aD, a, A1, cudaMemcpyHostToDevice) );
fillKernel<<<blocks, threads>>>(aD, N, v);
TRY( cudaMemcpy(a, aD, A1, cudaMemcpyDeviceToHost) );
TRY( cudaFree(aD) );
}
template <class T>
void fillCuda(vector<T>& x, T v) {
fillCuda(x.data(), x.size(), v);
}
|
target-24.c | #include <omp.h>
#include <stdlib.h>
int
main ()
{
int d = omp_get_default_device ();
int id = omp_get_initial_device ();
if (d < 0 || d >= omp_get_num_devices ())
d = id;
int a[10] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 };
int *b = a;
int shared_mem = 0;
#pragma omp target map (alloc: shared_mem)
shared_mem = 1;
if (omp_target_is_present (b, d) != shared_mem)
abort ();
#pragma omp target enter data map (to: a)
if (omp_target_is_present (b, d) == 0)
abort ();
#pragma omp target enter data map (alloc: b[:0])
if (omp_target_is_present (b, d) == 0)
abort ();
#pragma omp target exit data map (release: b[:0])
if (omp_target_is_present (b, d) == 0)
abort ();
#pragma omp target exit data map (release: b[:0])
if (omp_target_is_present (b, d) != shared_mem)
abort ();
#pragma omp target enter data map (to: a)
if (omp_target_is_present (b, d) == 0)
abort ();
#pragma omp target enter data map (always, to: b[:0])
if (omp_target_is_present (b, d) == 0)
abort ();
#pragma omp target exit data map (delete: b[:0])
if (omp_target_is_present (b, d) != shared_mem)
abort ();
#pragma omp target exit data map (from: b[:0])
return 0;
}
|
dgeqrf.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zgeqrf.c, normal z -> d, Fri Sep 28 17:38:01 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_geqrf
*
* Computes a tile QR factorization of a real or complex m-by-n matrix A.
* The factorization has the form
* \f[ A = Q \times R \f],
* where Q is a matrix with orthonormal columns and R is an upper triangular
* with positive diagonal.
*
*******************************************************************************
*
* @param[in] m
* The number of rows of the matrix A.
* m >= 0.
*
* @param[in] n
* The number of columns of the matrix A.
* n >= 0.
*
* @param[in,out] pA
* On entry, pointer to the m-by-n matrix A.
* On exit, the elements on and above the diagonal of the array contain
* the min(m,n)-by-n upper trapezoidal matrix R (R is upper triangular
* if m >= n); the elements below the diagonal represent the orthogonal
* matrix Q as a product of elementary reflectors stored by tiles.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,m).
*
* @param[out] T
* On exit, auxiliary factorization data, required by plasma_dgeqrs to
* solve the system of equations.
* Matrix in T is allocated inside this function and needs to be
* destroyed by plasma_desc_destroy.
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
* @retval < 0 if -i, the i-th argument had an illegal value
*
*******************************************************************************
*
* @sa plasma_omp_dgeqrf
* @sa plasma_cgeqrf
* @sa plasma_dgeqrf
* @sa plasma_sgeqrf
* @sa plasma_dgeqrs
* @sa plasma_dgels
*
******************************************************************************/
int plasma_dgeqrf(int m, int n,
double *pA, int lda,
plasma_desc_t *T)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if (m < 0) {
plasma_error("illegal value of m");
return -1;
}
if (n < 0) {
plasma_error("illegal value of n");
return -2;
}
if (lda < imax(1, m)) {
plasma_error("illegal value of lda");
return -4;
}
// quick return
if (imin(m, n) == 0)
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_geqrf(plasma, PlasmaRealDouble, m, n);
// Set tiling parameters.
int ib = plasma->ib;
int nb = plasma->nb;
plasma_enum_t householder_mode = plasma->householder_mode;
// Create tile matrix.
plasma_desc_t A;
int retval;
retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb,
m, n, 0, 0, m, n, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
// Prepare descriptor T.
retval = plasma_descT_create(A, ib, householder_mode, T);
if (retval != PlasmaSuccess) {
plasma_error("plasma_descT_create() failed");
return retval;
}
// Allocate workspace.
plasma_workspace_t work;
size_t lwork = nb + ib*nb; // geqrt: tau + work
retval = plasma_workspace_create(&work, lwork, PlasmaRealDouble);
if (retval != PlasmaSuccess) {
plasma_error("plasma_workspace_create() failed");
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_dge2desc(pA, lda, A, &sequence, &request);
// Call the tile async function.
plasma_omp_dgeqrf(A, *T, work, &sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_ddesc2ge(A, pA, lda, &sequence, &request);
}
// implicit synchronization
plasma_workspace_destroy(&work);
// Free matrix A in tile layout.
plasma_desc_destroy(&A);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_geqrf
*
* Computes a tile QR factorization of a matrix.
* Non-blocking tile version of plasma_dgeqrf().
* May return before the computation is finished.
* Operates on matrices stored by tiles.
* All matrices are passed through descriptors.
* All dimensions are taken from the descriptors.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in,out] A
* Descriptor of matrix A.
* A is stored in the tile layout.
*
* @param[out] T
* Descriptor of matrix T.
* On exit, auxiliary factorization data, required by plasma_dgeqrs to
* solve the system of equations.
*
* @param[in] work
* Workspace for the auxiliary arrays needed by some coreblas kernels.
* For QR factorization, contains preallocated space for tau and work
* arrays. Allocated by the plasma_workspace_create function.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes).
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_dgeqrf
* @sa plasma_omp_cgeqrf
* @sa plasma_omp_dgeqrf
* @sa plasma_omp_sgeqrf
* @sa plasma_omp_dgeqrs
* @sa plasma_omp_dgeqrs
* @sa plasma_omp_dgels
*
******************************************************************************/
void plasma_omp_dgeqrf(plasma_desc_t A, plasma_desc_t T,
plasma_workspace_t work,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(T) != PlasmaSuccess) {
plasma_error("invalid T");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_fatal_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_fatal_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (imin(A.m, A.n) == 0)
return;
// Call the parallel function.
if (plasma->householder_mode == PlasmaTreeHouseholder) {
plasma_pdgeqrf_tree(A, T, work, sequence, request);
}
else {
plasma_pdgeqrf(A, T, work, sequence, request);
}
}
|
mpi-openmp.c | #include <stdio.h>
#include <mpi.h>
#include <omp.h>
#include <time.h>
#include <stdlib.h>
#include <math.h>
#include <stdbool.h>
#include <sys/time.h>
#define MASTER 0
int pwork = 1;
int cwork[8];
struct timeval startwtime, endwtime;
double arr_time;
FILE* out;
int main(int argc, char** argv)
{
if (argc != 3) {
printf("Usage: %s <dim> <numThreads>\n", argv[0]);
exit(-1);
}
/* start MPI Process */
MPI_Init(&argc, &argv);
int rank, numnodes;
/* get the id (rank) */
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
/* Get the total number of threads */
MPI_Comm_size(MPI_COMM_WORLD, &numnodes);
int dim = atoi(argv[1]);
int numThreads = atoi(argv[2]);
int* data = (int*) malloc(dim * sizeof(int));
double* roots = (double*) malloc(dim * sizeof(double));
omp_set_num_threads(numThreads); // OpenMP call to set threads per rank
/* init data for all threads */
#pragma omp parallel
for (int i = 0; i < dim; i++) {
data[i] = i;
}
if (rank == MASTER) {
out = fopen("data.out", "w");
gettimeofday(&startwtime, NULL);
}
/* Producer */
if (rank == MASTER) {
#pragma omp single
for (int idx = 1; idx < dim; idx++) {
/* wait for a worker to become available */
MPI_Status status;
double root = 0;
MPI_Recv(&root, 1, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
/* if a root was computed */
if (status.MPI_TAG > 0) {
roots[status.MPI_TAG] = root;
}
pwork++;
MPI_Send(&(data[idx]), 1, MPI_INT, status.MPI_SOURCE, idx, MPI_COMM_WORLD);
}
/* send termination signal to each rank when they submit their last job */
#pragma omp single
for (int idx = 0; idx < numnodes-1; idx++) {
/* wait for a worker to become available */
MPI_Status status;
double root = 0;
MPI_Recv(&root, 1, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
/* if a root was computed */
if (status.MPI_TAG > 0) {
roots[status.MPI_TAG] = root;
}
/* send termination signal (tag = 0) */
MPI_Send(&idx, 1, MPI_INT, status.MPI_SOURCE, 0, MPI_COMM_WORLD);
}
} else { /* consumer */
/* announce myself to producer */
double root = 0;
MPI_Send(&root, 1, MPI_DOUBLE, 0, 0, MPI_COMM_WORLD);
bool terminated = false;
#pragma omp single
do {
/* wait for a job */
int num = 0;
MPI_Status status;
MPI_Recv(&num, 1, MPI_INT, 0, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
if (!status.MPI_TAG) {
terminated = true;
} else {
cwork[rank] += 1;
root = sqrt(num);
MPI_Send(&root, 1, MPI_DOUBLE, 0, status.MPI_TAG, MPI_COMM_WORLD);
}
} while (!terminated);
}
MPI_Barrier(MPI_COMM_WORLD);
if (rank == MASTER) {
gettimeofday(&endwtime, NULL);
arr_time = (double)((endwtime.tv_usec - startwtime.tv_usec)/1.0e6 + endwtime.tv_sec - startwtime.tv_sec);
printf("Time taken = %f\n", arr_time);
}
MPI_Barrier(MPI_COMM_WORLD);
if (rank == MASTER) {
for (int i = 0; i < dim; i++) {
fprintf(out, "sqrt(%i) = %f\n", data[i], roots[i]);
}
printf("work done by producer: %d\n", pwork);
printf("work done by consumers:\n");
}
MPI_Barrier(MPI_COMM_WORLD);
for (int i = 1; i < numnodes; i++) {
if (rank == i) {
printf("%d\n", cwork[i]);
}
}
MPI_Finalize();
} |
GB_unop__identity_int32_uint64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int32_uint64)
// op(A') function: GB (_unop_tran__identity_int32_uint64)
// C type: int32_t
// A type: uint64_t
// cast: int32_t cij = (int32_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int32_t z = (int32_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int32_t z = (int32_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int32_uint64)
(
int32_t *Cx, // Cx and Ax may be aliased
const uint64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t aij = Ax [p] ;
int32_t z = (int32_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint64_t aij = Ax [p] ;
int32_t z = (int32_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int32_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
NDArray.h | #ifndef NDARRAY_H
#define NDARRAY_H
#include <initializer_list>
#include <functional>
#include <shape.h>
#include "NativeOpExcutioner.h"
#include <memory/Workspace.h>
#include <indexing/NDIndex.h>
#include <indexing/IndicesList.h>
#include <graph/Intervals.h>
#include <array/DataType.h>
#include <stdint.h>
namespace nd4j {
template<typename T> class ND4J_EXPORT NDArray;
ND4J_EXPORT NDArray<float> operator-(const float, const NDArray<float>&);
ND4J_EXPORT NDArray<float16> operator-(const float16, const NDArray<float16>&);
ND4J_EXPORT NDArray<double> operator-(const double, const NDArray<double>&);
ND4J_EXPORT NDArray<float> operator+(const float, const NDArray<float>&);
ND4J_EXPORT NDArray<float16> operator+(const float16, const NDArray<float16>&);
ND4J_EXPORT NDArray<double> operator+(const double, const NDArray<double>&);
template<typename T> NDArray<T> mmul(const NDArray<T>&, const NDArray<T>&);
template<typename T>
class NDArray {
protected:
/**
* if true then array doesn't own buffer and simply points to another's buffer
*/
bool _isView = false;
/**
* pointer on flattened data array in memory
*/
T *_buffer = nullptr;
/**
* contains shape info: matrix rank, numbers of elements per each dimension, dimensions strides, element-wise-stride, c-like or fortan-like order
*/
Nd4jLong *_shapeInfo = nullptr;
/**
* pointer on externally allocated memory where _buffer and _shapeInfo are stored
*/
nd4j::memory::Workspace* _workspace = nullptr;
/**
* alternative buffers for special computational devices (like GPUs for CUDA)
*/
T* _bufferD = nullptr;
Nd4jLong *_shapeInfoD = nullptr;
/**
* indicates whether user allocates memory for _buffer/_shapeInfo by himself, in opposite case the memory must be allocated from outside
*/
bool _isShapeAlloc = false;
bool _isBuffAlloc = false;
/**
* type of array elements
*/
DataType _dataType = DataType_FLOAT;
std::string toStringValue(T value);
public:
/**
* default constructor, do not allocate memory, memory for array is passed from outside
*/
NDArray(T *buffer = nullptr, Nd4jLong* shapeInfo = nullptr, nd4j::memory::Workspace* workspace = nullptr);
NDArray(std::initializer_list<Nd4jLong> shape, nd4j::memory::Workspace* workspace = nullptr);
/**
* Constructor for scalar NDArray
*/
NDArray(T scalar);
/**
* copy constructor
*/
NDArray(const NDArray<T>& other);
/**
* move constructor
*/
NDArray(NDArray<T>&& other) noexcept;
#ifndef __JAVACPP_HACK__
// this method only available out of javacpp
/**
* This constructor creates vector of T
*
* @param values
*/
NDArray(std::initializer_list<T> values, nd4j::memory::Workspace* workspace = nullptr);
NDArray(std::vector<T> &values, nd4j::memory::Workspace* workspace = nullptr);
#endif
/**
* constructor, create empty array stored at given workspace
*/
NDArray(nd4j::memory::Workspace* workspace);
/**
* this constructor creates new NDArray with shape matching "other" array, do not copy "other" elements into new array
*/
NDArray(const NDArray<T> *other, const bool copyStrides = false, nd4j::memory::Workspace* workspace = nullptr);
/**
* constructor creates new NDArray using shape information from "shapeInfo", set all elements in new array to be zeros, if copyStrides is true then use stride values from "shapeInfo", else calculate strides independently
*/
NDArray(const Nd4jLong* shapeInfo, const bool copyStrides = false, nd4j::memory::Workspace* workspace = nullptr);
/**
* this constructor creates new array using shape information contained in vector argument
*/
NDArray(const char order, const std::vector<Nd4jLong> &shape, nd4j::memory::Workspace* workspace = nullptr);
/**
* This constructor creates new array with elements copied from data and using shape information stored in shape
*
* PLEASE NOTE: data will be copied AS IS, without respect to specified order. You must ensure order match here.
*/
NDArray(const char order, const std::vector<Nd4jLong> &shape, const std::vector<T> &data, nd4j::memory::Workspace* workspace = nullptr);
/**
* this constructor creates new array using given buffer (without memory allocating) and shape information stored in shape
*/
NDArray(T *buffer, const char order, const std::vector<Nd4jLong> &shape , nd4j::memory::Workspace* workspace = nullptr);
/**
* copy assignment operator
*/
NDArray<T>& operator=(const NDArray<T>& other);
/**
* move assignment operator
*/
NDArray<T>& operator=(NDArray<T>&& other) noexcept;
/**
* assignment operator, assigns the same scalar to all array elements
*/
NDArray<T>& operator=(const T scalar);
/**
* operators for memory allocation and deletion
*/
void* operator new(size_t i);
void operator delete(void* p);
/**
* method replaces existing buffer/shapeinfo, AND releases original pointers (if releaseExisting TRUE)
*/
void replacePointers(T *buffer, Nd4jLong *shapeInfo, const bool releaseExisting = true);
/**
* create a new array by replicating current array by repeats times along given dimension
* dimension - dimension along which to repeat elements
* repeats - number of repetitions
*/
NDArray<T>* repeat(int dimension, const std::vector<Nd4jLong>& repeats) const;
/**
* fill target array by repeating current array
* dimension - dimension along which to repeat elements
*/
void repeat(int dimension, NDArray<T>& target) const;
/**
* return _dataType;
*/
DataType dataType() const;
/**
* creates array which is view of this array
*/
NDArray<T>* getView();
/**
* creates array which points on certain sub-range of this array, sub-range is defined by given indices
*/
NDArray<T> *subarray(IndicesList& indices) const;
NDArray<T> *subarray(IndicesList& indices, std::vector<Nd4jLong>& strides) const;
NDArray<T>* subarray(const std::initializer_list<NDIndex*>& idx) const;
NDArray<T>* subarray(const Intervals& idx) const;
/**
* cast array elements to given dtype
*/
NDArray<T>* cast(DataType dtype);
void cast(NDArray<T>* target, DataType dtype);
/**
* returns _workspace
*/
nd4j::memory::Workspace* getWorkspace() const {
return _workspace;
}
/**
* returns _buffer
*/
T* getBuffer();
T* buffer();
/**
* returns _shapeInfo
*/
Nd4jLong* shapeInfo();
Nd4jLong* getShapeInfo() const;
/**
* if _bufferD==nullptr return _buffer, else return _bufferD
*/
T* specialBuffer();
/**
* if _shapeInfoD==nullptr return _shapeInfo, else return _shapeInfoD
*/
Nd4jLong* specialShapeInfo();
/**
* set values for _bufferD and _shapeInfoD
*/
void setSpecialBuffers(T * buffer, Nd4jLong *shape);
/**
* permutes (in-place) the dimensions in array according to "dimensions" array
*/
bool permutei(const std::initializer_list<int>& dimensions);
bool permutei(const std::vector<int>& dimensions);
bool permutei(const int* dimensions, const int rank);
bool permutei(const std::initializer_list<Nd4jLong>& dimensions);
bool permutei(const std::vector<Nd4jLong>& dimensions);
bool permutei(const Nd4jLong* dimensions, const int rank);
/**
* permutes the dimensions in array according to "dimensions" array, new array points on _buffer of this array
*/
NDArray<T>* permute(const std::initializer_list<int>& dimensions) const;
NDArray<T>* permute(const std::vector<int>& dimensions) const;
NDArray<T>* permute(const int* dimensions, const int rank) const;
void permute(const int* dimensions, const int rank, NDArray<T>& target) const;
void permute(const std::vector<int>& dimensions, NDArray<T>& target) const;
NDArray<T>* permute(const std::initializer_list<Nd4jLong>& dimensions) const;
NDArray<T>* permute(const std::vector<Nd4jLong>& dimensions) const;
NDArray<T>* permute(const Nd4jLong* dimensions, const int rank) const;
void permute(const Nd4jLong* dimensions, const int rank, NDArray<T>& target) const;
void permute(const std::vector<Nd4jLong>& dimensions, NDArray<T>& target) const;
/**
* This method streamlines given view or permuted array, and reallocates buffer
*/
void streamline(char order = 'a');
/**
* check whether array is contiguous in memory
*/
bool isContiguous();
/**
* prints information about array shape
* msg - message to print out
*/
void printShapeInfo(const char * msg = nullptr) const;
/**
* prints buffer elements
* msg - message to print out
* limit - number of array elements to print out
*/
void printBuffer(const char* msg = nullptr, Nd4jLong limit = -1);
/**
* prints buffer elements, takes into account offset between elements (element-wise-stride)
* msg - message to print out
* limit - number of array elements to print out
*/
void printIndexedBuffer(const char* msg = nullptr, Nd4jLong limit = -1) const;
std::string asIndexedString(Nd4jLong limit = -1);
std::string asString(Nd4jLong limit = -1);
/**
* this method assigns values of given array to this one
*/
void assign(const NDArray<T>* other);
/**
* this method assigns values of given array to this one
*/
void assign(const NDArray<T>& other);
/**
* this method assigns given value to all elements in array
*/
void assign(const T value);
/**
* returns new copy of this array, optionally in different order
*/
NDArray<T> *dup(const char newOrder = 'a');
/**
* returns sum of all elements of array
*/
T sumNumber() const;
/**
* returns mean number of array
*/
T meanNumber() const;
/**
* This method explicitly enforces new shape for this NDArray, old shape/stride information is lost
*/
void enforce(const std::initializer_list<Nd4jLong> &dimensions, char order = 'a');
void enforce(std::vector<Nd4jLong> &dimensions, char order = 'a');
/**
* calculates sum along dimension(s) in this array and save it to created reduced array
* dimensions - array of dimensions to calculate sum over
* keepDims - if true then put unities in place of reduced dimensions
*/
NDArray<T> *sum(const std::vector<int> &dimensions) const;
/**
* method reduces array by excluding its shapes along dimensions present in given dimensions vector, result is stored in new array to be returned
* dimensions - array of dimensions to reduce along
* keepDims - if true then put unities in place of reduced dimensions
*/
template<typename OpName>
NDArray<T>* reduceAlongDimension(const std::vector<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false) const;
template<typename OpName>
NDArray<T>* reduceAlongDimension(const std::initializer_list<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false) const;
template<typename OpName>
NDArray<T> reduceAlongDims(const std::vector<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false) const;
/**
* method reduces array by excluding its shapes along dimensions present in given dimensions vector
* target - where to save result of reducing
* dimensions - array of dimensions to reduce along
* keepDims - if true then put unities in place of reduced dimensions
* extras - extra parameters
*/
template<typename OpName>
void reduceAlongDimension(NDArray<T>* target, const std::vector<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false, T *extras = nullptr) const;
/**
* return variance of array elements set
* biasCorrected - if true bias correction will be applied
*/
template<typename OpName>
T varianceNumber(bool biasCorrected = true);
/**
* apply scalar operation to array
* extraParams - extra parameters for operation
*/
template<typename OpName>
T reduceNumber(T *extraParams = nullptr) const;
/**
* returns element index which corresponds to some condition imposed by operation
* extraParams - extra parameters for operation
*/
template<typename OpName>
Nd4jLong indexReduceNumber(T *extraParams = nullptr);
/**
* returns index of max element in a given array (optionally: along given dimension(s))
* dimensions - optional vector with dimensions
*/
Nd4jLong argMax(std::initializer_list<int> dimensions = {});
/**
* apply OpName transformation directly to array
* extraParams - extra parameters for operation
*/
template<typename OpName>
void applyTransform(T *extraParams = nullptr);
/**
* apply OpName transformation to array and store result in target
* target - where to store result
* extraParams - extra parameters for operation
*/
template<typename OpName>
void applyTransform(NDArray<T> *target, T *extraParams = nullptr);
/**
* apply OpName transformation to this array and store result in new array being returned
* extraParams - extra parameters for operation
*/
template<typename OpName>
NDArray<T> transform(T *extraParams = nullptr) const;
/**
* apply pairwise OpName transformation based on "this" and "other" arras elements, store result in this array
* other - second array necessary for pairwise operation
* extraParams - extra parameters for operation
*/
template<typename OpName>
void applyPairwiseTransform(NDArray<T> *other, T *extraParams);
/**
* apply pairwise OpName transformation based on "this" and "other" arras elements, store result in target array
* other - second array necessary for pairwise operation
* target - where to store result
* extraParams - extra parameters for operation
*/
template<typename OpName>
void applyPairwiseTransform(NDArray<T> *other, NDArray<T> *target, T *extraParams);
/**
* apply operation which requires broadcasting, broadcast a smaller array (tad) along bigger one (this)
* tad - array to broadcast
* dimensions - dimensions array to broadcast along
* target - where to store result
* extraParams - extra parameters for operation
*/
template<typename OpName>
void applyBroadcast(std::initializer_list<int> dimensions, const NDArray<T>* tad, NDArray<T>* target = nullptr, T* extraArgs = nullptr);
template <typename OpName>
void applyBroadcast(std::vector<int> &dimensions, const NDArray<T> *tad, NDArray<T> *target = nullptr, T *extraArgs = nullptr);
/**
* apply operation which requires broadcasting, broadcast one tensor along another, also this method checks the possibility of broadcasting
* other - input array
* extraParams - extra parameters for operation
*/
template <typename OpName>
NDArray<T> applyTrueBroadcast(const NDArray<T>& other, T *extraArgs = nullptr) const;
template <typename OpName>
NDArray<T>* applyTrueBroadcast(const NDArray<T>* other, T *extraArgs = nullptr) const;
/**
* apply operation which requires broadcasting, broadcast one tensor along another, also this method checks the possibility of broadcasting
* other - input array
* target - where to store result
* checkTargetShape - if true check whether target shape is suitable for broadcasting
* extraParams - extra parameters for operation
*/
template <typename OpName>
void applyTrueBroadcast(const NDArray<T>* other, NDArray<T>* target, const bool checkTargetShape = true, T *extraArgs = nullptr) const;
/**
* apply a scalar operation to an array
* scalar - input scalar
* target - where to store result
* extraParams - extra parameters for operation
*/
template<typename OpName>
void applyScalar(T scalar, NDArray<T>* target = nullptr, T *extraParams = nullptr) const;
/**
* apply a scalar operation to an array
* scalar - input array which is simple scalar
* target - where to store result
* extraParams - extra parameters for operation
*/
template<typename OpName>
void applyScalar(NDArray<T>& scalar, NDArray<T>* target = nullptr, T *extraParams = nullptr) const;
#ifndef __JAVACPP_HACK__
/**
* apply operation "func" to an array
* func - what operation to apply
* target - where to store result
*/
void applyLambda(const std::function<T(T)>& func, NDArray<T>* target = nullptr);
void applyIndexedLambda(const std::function<T(Nd4jLong, T)>& func, NDArray<T>* target = nullptr);
/**
* apply pairwise operation "func" to an array
* other - input array
* func - what pairwise operation to apply
* target - where to store result
*/
void applyPairwiseLambda(NDArray<T>* other, const std::function<T(T, T)>& func, NDArray<T>* target = nullptr);
void applyIndexedPairwiseLambda(NDArray<T>* other, const std::function<T(Nd4jLong, T, T)>& func, NDArray<T>* target = nullptr);
void applyTriplewiseLambda(NDArray<T>* second, NDArray<T> *third, const std::function<T(T, T, T)>& func, NDArray<T>* target = nullptr);
#endif
/**
* apply OpName random operation to array
* buffer - pointer on RandomBuffer
* y - optional input array
* z - optional input array
* extraArgs - extra parameters for operation
*/
template<typename OpName>
void applyRandom(nd4j::random::RandomBuffer *buffer, NDArray<T>* y = nullptr, NDArray<T>* z = nullptr, T* extraArgs = nullptr);
/**
* apply transpose operation to the copy of this array, that is this array remains unaffected
*/
NDArray<T> *transpose() const;
/**
* perform transpose operation and store result in target, this array remains unaffected
* target - where to store result
*/
void transpose(NDArray<T>& target) const;
/**
* apply in-place transpose operation to this array, so this array becomes transposed
*/
void transposei();
/**
* return array pointing on certain range of this array
* index - the number of array to be returned among set of possible arrays
* dimensions - array of dimensions to point on
*/
NDArray<T>* tensorAlongDimension(Nd4jLong index, const std::initializer_list<int>& dimensions) const;
NDArray<T>* tensorAlongDimension(Nd4jLong index, const std::vector<int>& dimensions) const;
/**
* returns the number of arrays pointing on specified dimension(s)
* dimensions - array of dimensions to point on
*/
Nd4jLong tensorsAlongDimension(const std::initializer_list<int> dimensions) const ;
Nd4jLong tensorsAlongDimension(const std::vector<int>& dimensions) const ;
/**
* returns true if elements of two arrays are equal to within given epsilon value
* other - input array to compare
* eps - epsilon, this value defines the precision of elements comparison
*/
bool equalsTo(const NDArray<T> *other, T eps = (T) 1e-5f) const;
bool equalsTo(NDArray<T> &other, T eps = (T) 1e-5f) const;
/**
* add given row vector to all rows of this array
* row - row vector to add
*/
void addiRowVector(const NDArray<T> *row);
/**
* add given row vector to all rows of this array, store result in target
* row - row vector to add
* target - where to store result
*/
void addRowVector(const NDArray<T> *row, NDArray<T>* target) const;
/**
* subtract given row vector from all rows of this array, store result in target
* row - row vector to subtract
* target - where to store result
*/
void subRowVector(const NDArray<T> *row, NDArray<T>* target) const;
/**
* multiply all rows of this array on given row vector, store result in target
* row - row vector to multiply on
* target - where to store result
*/
void mulRowVector(const NDArray<T> *row, NDArray<T>* target) const;
/**
* divide all rows of this array on given row vector, store result in target
* row - row vector to divide on
* target - where to store result
*/
void divRowVector(const NDArray<T> *row, NDArray<T>* target) const;
/**
* add given column vector to all columns of this array, store result in target
* column - column vector to add
* target - where to store result
*/
void addColumnVector(const NDArray<T> *column, NDArray<T>* target) const;
/**
* add given column vector to all columns of this array, this array becomes affected (in-place operation)
* column - column vector to add
*/
void addiColumnVector(const NDArray<T> *column);
/**
* multiply all columns of this array on given column vector, this array becomes affected (in-place operation)
* column - column vector to multiply on
*/
void muliColumnVector(const NDArray<T> *column);
/**
* returns number of bytes used by _buffer & _shapeInfo
*/
Nd4jLong memoryFootprint();
/**
* these methods suited for FlatBuffers use
*/
std::vector<T> getBufferAsVector();
std::vector<Nd4jLong> getShapeAsVector();
std::vector<Nd4jLong> getShapeInfoAsVector();
std::vector<int64_t> getShapeInfoAsFlatVector();
/**
* set new order and shape in case of suitable array length (in-place operation)
* order - order to set
* shape - shape to set
*
* if there was permute applied before or there are weird strides, then new buffer is allocated for array
*/
bool reshapei(const char order, const std::initializer_list<Nd4jLong>& shape);
bool reshapei(const char order, const std::vector<Nd4jLong>& shape);
bool reshapei(const std::initializer_list<Nd4jLong>& shape);
bool reshapei(const std::vector<Nd4jLong>& shape);
/**
* creates new array with corresponding order and shape, new array will point on _buffer of this array
* order - order to set
* shape - shape to set
*
* if permute have been applied before or there are weird strides, then new buffer is allocated for new array
*/
NDArray<T>* reshape(const char order, const std::vector<Nd4jLong>& shape) const;
/**
* calculate strides and set given order
* order - order to set
*/
void updateStrides(const char order);
/**
* change an array by repeating it the number of times given by reps (in-place operation)
* repeats - contains numbers of repetitions
*/
void tilei(const std::vector<Nd4jLong>& repeats);
/**
* returns new array which is created by by repeating of this array the number of times given by reps
* repeats - contains numbers of repetitions
*/
NDArray<T> tile(const std::vector<Nd4jLong>& repeats) const;
/**
* change an array by repeating it the number of times given by reps (in-place operation)
* repeats - contains numbers of repetitions
* target - where to store result
*/
void tile(const std::vector<Nd4jLong>& repeats, NDArray<T>& target) const;
/**
* change an array by repeating it the number of times to acquire the new shape which is the same as target shape
* target - where to store result
*/
void tile(NDArray<T>& target) const;
/**
* returns an array which is result of broadcasting of this and other arrays
* other - input array
*/
NDArray<T>* broadcast(const NDArray<T>& other);
/**
* check whether array's rows (arg=0) or columns (arg=1) create orthogonal basis
* arg - 0 -> row, 1 -> column
*/
bool hasOrthonormalBasis(const int arg);
/**
* check whether array is identity matrix
*/
bool isIdentityMatrix();
/**
* check whether array is unitary matrix
*/
bool isUnitary();
/**
* reduces dimensions in this array relying on index operation OpName
* dimensions - vector of dimensions to reduce along
* extraArgs - extra parameters for operation
*/
template<typename OpName>
NDArray<T>* applyIndexReduce(const std::vector<int>& dimensions, const T *extraParams = nullptr) const;
/**
* reduces dimensions in array relying on index operation OpName
* target - where to store result
* dimensions - vector of dimensions to reduce along
* extraArgs - extra parameters for operation
*/
template<typename OpName>
void applyIndexReduce(const NDArray<T>* target, const std::vector<int>& dimensions, const T *extraParams = nullptr) const;
/**
* apply reduce3 operation OpName to this and other array, return result in new output array
* other - input array
* extraArgs - extra parameters for operation
*/
template<typename OpName>
NDArray<T>* applyReduce3(const NDArray<T>* other, const T* extraParams = nullptr) const;
/**
* apply reduce3 operation OpName to this and other array, return result in new output array
* other - input array
* dimensions - vector of dimensions to reduce along
* extraArgs - extra parameters for operation
*/
template<typename OpName>
NDArray<T>* applyAllReduce3(const NDArray<T>* other, const std::vector<int>& dimensions, const T* extraParams = nullptr) const;
/**
* apply reduce3 (exec) operation OpName to this and other array, return result in new output array
* other - input array
* dimensions - vector of dimensions to reduce along
* extraArgs - extra parameters for operation
*/
template<typename OpName>
NDArray<T>* applyReduce3(const NDArray<T>* other, const std::vector<int>& dimensions, const T* extraParams = nullptr) const;
/**
* returns variance along given dimensions
* biasCorrected - if true bias correction will be applied
* dimensions - vector of dimensions to calculate variance along
*/
template<typename OpName>
NDArray<T>* varianceAlongDimension(const bool biasCorrected, const std::vector<int>& dimensions) const;
template<typename OpName>
NDArray<T>* varianceAlongDimension(const bool biasCorrected, const std::initializer_list<int>& dimensions) const;
template<typename OpName>
void varianceAlongDimension(const NDArray<T>* target, const bool biasCorrected, const std::vector<int>& dimensions);
template<typename OpName>
void varianceAlongDimension(const NDArray<T>* target, const bool biasCorrected, const std::initializer_list<int>& dimensions);
/**
* operator returns sub-array with buffer pointing at this->_buffer with offset defined by given intervals
* idx - intervals of indexes which define the sub-arrays to point on
* keepUnitiesInShape - if false then eliminate unities from resulting array shape, for example {1,a,1,b} -> {a,b}
*/
NDArray<T> operator()(const Intervals& idx, bool keepUnitiesInShape = false) const;
/**
* operator returns sub-array with buffer pointing at this->_buffer with offset defined by given intervals
* idx - intervals of indexes which define the sub-arrays to point on, idx has form {dim0Start,dim0End, dim1Start,dim1End, ....} and length (2 * this->rankOf())
* when (dimStart == dimEnd) then whole range will be used for current dimension
* keepUnitiesInShape - if false then eliminate unities from resulting array shape, for example {1,a,1,b} -> {a,b}
*/
NDArray<T> operator()(const int* idx, bool keepUnitiesInShape = false) const;
/**
* addition operator: array + other
* other - input array to add
*/
NDArray<T> operator+(const NDArray<T>& other) const;
/**
* addition operator: array + scalar
* scalar - input scalar to add
*/
NDArray<T> operator+(const T scalar) const;
/**
* friend functions which implement addition operator: scalar + array
* scalar - input scalar to add
*/
friend NDArray<float> nd4j::operator+(const float scalar, const NDArray<float>& arr);
friend NDArray<float16> nd4j::operator+(const float16 scalar, const NDArray<float16>& arr);
friend NDArray<double> nd4j::operator+(const double scalar, const NDArray<double>& arr);
/**
* addition unary operator array += other
* other - input array to add
*/
void operator+=(const NDArray<T>& other);
/**
* subtraction unary operator array -= other
* other - input array to add
*/
void operator-=(const NDArray<T>& other);
void operator+=(const T other);
void operator-=(const T other);
/**
* subtraction operator: array - other
* other - input array to subtract
*/
NDArray<T> operator-(const NDArray<T>& other) const;
/**
* subtraction operator: array - scalar
* scalar - input scalar to subtract
*/
NDArray<T> operator-(const T& scalar) const;
/**
* negative operator, it changes sign of all array elements on opposite
*/
NDArray<T> operator-() const;
/**
* friend functions which implement subtraction operator: scalar - array
* scalar - input scalar to subtract
*/
friend NDArray<float> nd4j::operator-(const float scalar, const NDArray<float>& arr);
friend NDArray<float16> nd4j::operator-(const float16 scalar, const NDArray<float16>& arr);
friend NDArray<double> nd4j::operator-(const double scalar, const NDArray<double>& arr);
/**
* pairwise multiplication operator: array * other
* other - input array to multiply on
*/
NDArray<T> operator*(const NDArray<T>& other) const;
/**
* multiplication operator: array * scalar
* scalar - input scalar to multiply on
*/
NDArray<T> operator*(const T scalar) const;
/**
* pairwise multiplication unary operator array *= other
* other - input array to multiply on
*/
void operator*=(const NDArray<T>& other);
/**
* multiplication unary operator array *= scalar
* scalar - input scalar to multiply on
*/
void operator*=(const T scalar);
/**
* pairwise division operator: array / other
* other - input array to divide on
*/
NDArray<T> operator/(const NDArray<T>& other) const;
/**
* division operator: array / scalar
* scalar - input scalar to divide each array element on
*/
NDArray<T> operator/(const T scalar) const;
/**
* pairwise division unary operator: array /= other
* other - input array to divide on
*/
void operator/=(const NDArray<T>& other);
/**
* division unary operator: array /= scalar
* scalar - input scalar to divide on
*/
void operator/=(const T scalar);
/**
* friend function which implements mathematical multiplication of two arrays
* left - input array
* right - input array
*/
friend NDArray<T> mmul<>(const NDArray<T>& left, const NDArray<T>& right);
/**
* this method assigns elements of other array to the sub-array of this array defined by given intervals
* other - input array to assign elements from
* idx - intervals of indexes which define the sub-array
*/
void assign(const NDArray<T>& other, const Intervals& idx);
/**
* return vector containing _buffer as flat binary array
*/
std::vector<int8_t> asByteVector();
/**
* makes array to be identity matrix (not necessarily square), that is set all diagonal elements = 1, rest = 0
*/
void setIdentity();
/**
* swaps the contents of tow arrays,
* PLEASE NOTE: method doesn't take into account the shapes of arrays, shapes may be different except one condition: arrays lengths must be the same
*/
void swapUnsafe(NDArray<T>& other);
/**
* return vector with buffer which points on corresponding diagonal elements of array
* type - means of vector to be returned: column ('c') or row ('r')
*/
NDArray<T>* diagonal(const char type ) const;
/**
* fill matrix with given value starting from specified diagonal in given direction, works only with 2D matrix
*
* diag - diagonal starting from matrix is filled.
* diag = 0 corresponds to main diagonal,
* diag < 0 below main diagonal
* diag > 0 above main diagonal
* direction - in what direction to fill matrix. There are 2 possible directions:
* 'u' - fill up, mathematically this corresponds to lower triangular matrix
* 'l' - fill down, mathematically this corresponds to upper triangular matrix
*/
void setValueInDiagMatrix(const T& value, const int diag, const char direction);
/**
* change an array by repeating it the number of times in order to acquire new shape equal to the input shape
*
* shape - contains new shape to broadcast array to
* target - optional argument, if target != nullptr the resulting array will be placed it target, in opposite case tile operation is done in place
*/
void tileToShape(const std::vector<Nd4jLong>& shape, NDArray<T>* target = nullptr);
void tileToShape(const std::initializer_list<Nd4jLong>& shape, NDArray<T>* target = nullptr);
template <typename N>
NDArray<N>* asT();
/**
* calculates the trace of an array, that is sum of elements on main diagonal = sum array[i, i, i, ...]
*/
T getTrace() const;
/**
* default destructor
*/
~NDArray() noexcept;
/**
* set _shapeInfo
*/
FORCEINLINE void setShapeInfo(Nd4jLong *shapeInfo);
/**
* set _buffer
*/
FORCEINLINE void setBuffer(T* buffer);
/**
* set _isBuffAlloc and _isShapeAlloc
*/
FORCEINLINE void triggerAllocationFlag(bool bufferAllocated, bool shapeAllocated);
/**
* returns the value of "dim" dimension
*/
Nd4jLong sizeAt(int dim) const;
/**
* returns order of array
*/
FORCEINLINE char ordering() const;
/**
* return _isView
*/
FORCEINLINE bool isView();
/**
* returns shape portion of shapeInfo
*/
FORCEINLINE Nd4jLong* shapeOf() const;
/**
* returns strides portion of shapeInfo
*/
FORCEINLINE Nd4jLong* stridesOf() const;
/**
* returns rank of array
*/
FORCEINLINE int rankOf() const;
/**
* returns length of array
*/
FORCEINLINE Nd4jLong lengthOf() const;
/**
* returns number of rows in array
*/
FORCEINLINE Nd4jLong rows() const;
/**
* returns number of columns in array
*/
FORCEINLINE Nd4jLong columns() const;
/**
* returns size of array elements type
*/
FORCEINLINE int sizeOfT() const;
/**
* returns element-wise-stride
*/
FORCEINLINE Nd4jLong ews() const;
// returns true if arrays have same shape
FORCEINLINE bool isSameShape(const NDArray<T> *other) const;
FORCEINLINE bool isSameShape(NDArray<T> &other) const;
FORCEINLINE bool isSameShape(const std::initializer_list<Nd4jLong>& shape) const;
FORCEINLINE bool isSameShape(const std::vector<Nd4jLong>& shape) const;
/**
* returns true if these two NDArrays have same rank, dimensions, strides, ews and order
*/
FORCEINLINE bool isSameShapeStrict(const NDArray<T> *other) const;
/**
* returns true if buffer && shapeInfo were defined (non nullptr)
*/
FORCEINLINE bool nonNull() const;
/**
* returns array element with given index from linear buffer
* i - element index in array
*/
FORCEINLINE T getScalar(const Nd4jLong i) const;
/**
* returns array element with given index, takes into account offset between elements (element-wise-stride)
* i - element index in array
*/
FORCEINLINE T getIndexedScalar(const Nd4jLong i) const;
/**
* returns element with given indexes from 2D array
* i - number of row
* j - number of column
*/
FORCEINLINE T getScalar(const Nd4jLong i, const Nd4jLong j) const;
/**
* returns element with given indexes from 3D array
* i - height
* j - width
* k - depth
*/
FORCEINLINE T getScalar(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k) const;
/**
* assigns given scalar to array element by given index, takes into account offset between elements (element-wise-stride)
* i - element index in array
* value - scalar value to assign
*/
FORCEINLINE void putIndexedScalar(const Nd4jLong i, const T value);
/**
* assigns given scalar to array element by given index, regards array buffer as linear
* i - element index in array
* value - scalar value to assign
*/
FORCEINLINE void putScalar(const Nd4jLong i, const T value);
/**
* assigns given scalar to 2D array element by given indexes
* i - number of row
* j - number of row
* value - scalar value to assign
*/
FORCEINLINE void putScalar(const Nd4jLong i, const Nd4jLong j, const T value);
/**
* assigns given scalar to 3D array element by given indexes
* i - height
* j - width
* k - depth
* value - scalar value to assign
*/
FORCEINLINE void putScalar(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k, const T value);
/**
* returns true if array is 2D
*/
FORCEINLINE bool isMatrix() const;
/**
* returns true if array is vector
*/
FORCEINLINE bool isVector() const;
/**
* returns true if array is column vector
*/
FORCEINLINE bool isColumnVector() const;
/**
* returns true if array is row vector
*/
FORCEINLINE bool isRowVector() const;
/**
* returns true if array is scalar
*/
FORCEINLINE bool isScalar() const;
/**
* inline accessing operator for matrix, i - absolute index
*/
FORCEINLINE T operator()(const Nd4jLong i) const;
/**
* inline modifying operator for matrix, i - absolute index
*/
FORCEINLINE T& operator()(const Nd4jLong i);
/**
* inline accessing operator for 2D array, i - row, j - column
*/
FORCEINLINE T operator()(const Nd4jLong i, const Nd4jLong j) const;
/**
* inline modifying operator for 2D array, i - row, j - column
*/
FORCEINLINE T& operator()(const Nd4jLong i, const Nd4jLong j);
/**
* inline accessing operator for 3D array, i - height, j - width, k - depth
*/
FORCEINLINE T operator()(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k) const;
/**
* inline modifying operator for 3D array, i - height, j - width, k - depth
*/
FORCEINLINE T& operator()(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k);
/**
* inline modifying operator for 4D array, i - height, j - width, k - depth
*/
FORCEINLINE T& operator()(const Nd4jLong t, const Nd4jLong u, const Nd4jLong v, const Nd4jLong w);
/**
* inline accessing operator for 4D array, i - height, j - width, k - depth
*/
FORCEINLINE T operator()(const Nd4jLong t, const Nd4jLong u, const Nd4jLong v, const Nd4jLong w) const;
template <typename T2>
FORCEINLINE std::vector<T2> asVectorT();
FORCEINLINE bool isAttached();
NDArray<T>* detach();
};
//////////////////////////////////////////////////////////////////////////
///// IMLEMENTATION OF INLINE METHODS /////
//////////////////////////////////////////////////////////////////////////
template <typename T>
template <typename T2>
std::vector<T2> NDArray<T>::asVectorT() {
std::vector<T2> result(this->lengthOf());
#pragma omp parallel for simd
for (int e = 0; e < this->lengthOf(); e++)
result[e] = (T2) this->getIndexedScalar(e);
return result;
}
template<typename T>
bool NDArray<T>::isAttached() {
return this->_workspace != nullptr;
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
void NDArray<T>::setShapeInfo(Nd4jLong *shapeInfo) {
if(_isShapeAlloc && _workspace == nullptr)
delete []_shapeInfo;
_shapeInfo = shapeInfo;
_isShapeAlloc = false;
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
void NDArray<T>::setBuffer(T* buffer) {
if(_isBuffAlloc && _workspace == nullptr)
delete []_buffer;
_buffer = buffer;
_isBuffAlloc = false;
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
void NDArray<T>::triggerAllocationFlag(bool bufferAllocated, bool shapeAllocated) {
_isBuffAlloc = bufferAllocated;
_isShapeAlloc = shapeAllocated;
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
char NDArray<T>::ordering() const {
return shape::order(_shapeInfo);
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
bool NDArray<T>::isView() {
return _isView;
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
Nd4jLong* NDArray<T>::shapeOf() const {
return shape::shapeOf(_shapeInfo);
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
Nd4jLong* NDArray<T>::stridesOf() const {
return shape::stride(_shapeInfo);
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
int NDArray<T>::rankOf() const {
return shape::rank(_shapeInfo);
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
Nd4jLong NDArray<T>::lengthOf() const {
return shape::length(_shapeInfo);
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
Nd4jLong NDArray<T>::rows() const {
return shapeOf()[0];
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
Nd4jLong NDArray<T>::columns() const {
return shapeOf()[1];
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
int NDArray<T>::sizeOfT() const {
return sizeof(T);
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
Nd4jLong NDArray<T>::ews() const {
return shape::elementWiseStride(_shapeInfo);
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
bool NDArray<T>::nonNull() const {
return this->_buffer != nullptr && this->_shapeInfo != nullptr;
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
bool NDArray<T>::isMatrix() const {
return shape::isMatrix(this->_shapeInfo);
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
bool NDArray<T>::isVector() const {
return !isScalar() && shape::isVector(this->_shapeInfo);
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
bool NDArray<T>::isColumnVector() const {
return !isScalar() && shape::isColumnVector(this->_shapeInfo);
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
bool NDArray<T>::isRowVector() const {
// 1D edge case
if (shape::rank(this->_shapeInfo) == 1)
return true;
return !isScalar() && shape::isRowVector(this->_shapeInfo);
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
bool NDArray<T>::isScalar() const {
return shape::isScalar(this->_shapeInfo);
}
// accessing operator for matrix, i - absolute index
template<typename T>
T NDArray<T>::operator()(const Nd4jLong i) const {
if (i >= shape::length(_shapeInfo))
throw std::invalid_argument("NDArray::operator(i): dinput index is out of array length !");
auto ews = shape::elementWiseStride(_shapeInfo);
char order = ordering();
if(ews == 1 && order == 'c')
return _buffer[i];
else if(ews > 1 && order == 'c')
return _buffer[i*ews];
else {
Nd4jLong idx[MAX_RANK];
shape::ind2subC(rankOf(), shapeOf(), i, idx);
Nd4jLong offset = shape::getOffset(0, shapeOf(), stridesOf(), idx, rankOf());
return _buffer[offset];
}
}
//////////////////////////////////////////////////////////////////////////
// modifying operator for matrix, i - absolute index
template<typename T>
T& NDArray<T>::operator()(const Nd4jLong i) {
if (i >= shape::length(_shapeInfo))
throw std::invalid_argument("NDArray::operator(i): input index is out of array length !");
auto ews = shape::elementWiseStride(_shapeInfo);
auto order = ordering();
if(ews == 1 && order == 'c')
return _buffer[i];
else if(ews > 1 && order == 'c')
return _buffer[i*ews];
else {
Nd4jLong idx[MAX_RANK];
shape::ind2subC(rankOf(), shapeOf(), i, idx);
auto offset = shape::getOffset(0, shapeOf(), stridesOf(), idx, rankOf());
return _buffer[offset];
}
}
//////////////////////////////////////////////////////////////////////////
// accessing operator for 2D matrix, i - row, j - column
template<typename T>
T NDArray<T>::operator()(const Nd4jLong i, const Nd4jLong j) const {
if (rankOf() != 2 || i >= shapeOf()[0] || j >= shapeOf()[1])
throw std::invalid_argument("NDArray::operator(i,j): one of input indexes is out of array length or rank!=2 !");
Nd4jLong coords[2] = {i, j};
auto xOffset = shape::getOffset(0, shapeOf(), stridesOf(), coords, rankOf());
return _buffer[xOffset];
}
//////////////////////////////////////////////////////////////////////////
// modifying operator for 2D matrix, i - row, j - column
template<typename T>
T& NDArray<T>::operator()(const Nd4jLong i, const Nd4jLong j) {
if (rankOf() != 2 || i >= shapeOf()[0] || j >= shapeOf()[1])
throw std::invalid_argument("NDArray::operator(i,j): one of input indexes is out of array length or rank!=2 !");
Nd4jLong coords[2] = {i, j};
auto xOffset = shape::getOffset(0, shapeOf(), stridesOf(), coords, rankOf());
return _buffer[xOffset];
}
//////////////////////////////////////////////////////////////////////////
// accessing operator for 3D array, i - row, j - column
template<typename T>
T NDArray<T>::operator()(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k) const {
if (rankOf() != 3 || i >= shapeOf()[0] || j >= shapeOf()[1] || j >= shapeOf()[2])
throw std::invalid_argument("NDArray::operator(i,j,k): one of input indexes is out of array length or rank!=3 !");
Nd4jLong coords[3] = {i, j, k};
auto xOffset = shape::getOffset(0, shapeOf(), stridesOf(), coords, rankOf());
return _buffer[xOffset];
}
//////////////////////////////////////////////////////////////////////////
// modifying operator for 3D array
template<typename T>
T& NDArray<T>::operator()(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k) {
if (rankOf() != 3 || i >= shapeOf()[0] || j >= shapeOf()[1] || k >= shapeOf()[2])
throw std::invalid_argument("NDArray::operator(i,j,k): one of input indexes is out of array length or rank!=3 !");
Nd4jLong coords[3] = {i, j, k};
auto xOffset = shape::getOffset(0, shapeOf(), stridesOf(), coords, rankOf());
return _buffer[xOffset];
}
template<typename T>
T NDArray<T>::operator()(const Nd4jLong t, const Nd4jLong u, const Nd4jLong v, const Nd4jLong w) const {
if (rankOf() != 4 || t >= shapeOf()[0] || u >= shapeOf()[1] || v >= shapeOf()[2] || w >= shapeOf()[3])
throw std::invalid_argument("NDArray::operator(t,u,v,w): one of input indexes is out of array length or rank!=4 !");
Nd4jLong coords[4] = {t, u, v, w};
auto xOffset = shape::getOffset(0, shapeOf(), stridesOf(), coords, rankOf());
return _buffer[xOffset];
}
template<typename T>
T& NDArray<T>::operator()(const Nd4jLong t, const Nd4jLong u, const Nd4jLong v, const Nd4jLong w) {
if (rankOf() != 4 || t >= shapeOf()[0] || u >= shapeOf()[1] || v >= shapeOf()[2] || w >= shapeOf()[3])
throw std::invalid_argument("NDArray::operator(t,u,v,w): one of input indexes is out of array length or rank!=4 !");
Nd4jLong coords[4] = {t, u, v, w};
auto xOffset = shape::getOffset(0, shapeOf(), stridesOf(), coords, rankOf());
return _buffer[xOffset];
}
//////////////////////////////////////////////////////////////////////////
// Return value from linear buffer
template<typename T>
T NDArray<T>::getScalar(const Nd4jLong i) const
{ return (*this)(i); }
//////////////////////////////////////////////////////////////////////////
template<typename T>
T NDArray<T>::getIndexedScalar(const Nd4jLong i) const {
return (*this)(i);
}
//////////////////////////////////////////////////////////////////////////
// Returns value from 2D matrix by coordinates/indexes
template<typename T>
T NDArray<T>::getScalar(const Nd4jLong i, const Nd4jLong j) const
{ return (*this)(i, j); }
//////////////////////////////////////////////////////////////////////////
// returns value from 3D tensor by coordinates
template<typename T>
T NDArray<T>::getScalar(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k) const
{ return (*this)(i, j, k); }
//////////////////////////////////////////////////////////////////////////
template<typename T>
void NDArray<T>::putIndexedScalar(const Nd4jLong i, const T value)
{ (*this)(i) = value; }
//////////////////////////////////////////////////////////////////////////
// This method sets value in linear buffer to position i
template<typename T>
void NDArray<T>::putScalar(const Nd4jLong i, const T value)
{ (*this)(i) = value; }
//////////////////////////////////////////////////////////////////////////
// This method sets value in 2D matrix to position i, j
template<typename T>
void NDArray<T>::putScalar(const Nd4jLong i, const Nd4jLong j, const T value)
{ (*this)(i,j) = value; }
//////////////////////////////////////////////////////////////////////////
// This method sets value in 3D matrix to position i,j,k
template<typename T>
void NDArray<T>::putScalar(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k, const T value)
{ (*this)(i,j,k) = value; }
//////////////////////////////////////////////////////////////////////////
template<typename T>
Nd4jLong NDArray<T>::memoryFootprint() {
Nd4jLong size = this->lengthOf() * this->sizeOfT();
size += shape::shapeInfoByteLength(this->rankOf());
return size;
}
//////////////////////////////////////////////////////////////////////////
// returns true if these two NDArrays have same shape
// still the definition of inline function must be in header file
template<typename T>
bool NDArray<T>::isSameShape(const std::vector<Nd4jLong>& other) const{
if (this->rankOf() != (int) other.size())
return false;
for (int e = 0; e < this->rankOf(); e++) {
if (this->shapeOf()[e] != other.at(e) && other.at(e) != -1)
return false;
}
return true;
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
bool NDArray<T>::isSameShape(const NDArray<T> *other) const {
return isSameShape(std::vector<Nd4jLong>(other->_shapeInfo+1, other->_shapeInfo+1+other->_shapeInfo[0]));
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
bool NDArray<T>::isSameShape(NDArray<T> &other) const {
return isSameShape(&other);
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
bool NDArray<T>::isSameShape(const std::initializer_list<Nd4jLong>& other) const {
return isSameShape(std::vector<Nd4jLong>(other));
}
//////////////////////////////////////////////////////////////////////////
// returns true if these two NDArrays have same _shapeInfo
// still the definition of inline function must be in header file
template<typename T>
bool NDArray<T>::isSameShapeStrict(const NDArray<T> *other) const {
return shape::equalsStrict(_shapeInfo, other->_shapeInfo);
}
}
#endif
|
api_calls_misc.c | // RUN: %libomp-compile && %libomp-run | FileCheck %s
// REQUIRES: ompt
#include "callback.h"
#include <omp.h>
int main() {
#pragma omp parallel num_threads(1)
{
// ompt_get_callback()
ompt_callback_t callback;
ompt_get_callback(ompt_callback_thread_begin, &callback);
printf("%" PRIu64 ": &on_ompt_callback_thread_begin=%p\n",
ompt_get_thread_data()->value, &on_ompt_callback_thread_begin);
printf("%" PRIu64 ": ompt_get_callback() result=%p\n",
ompt_get_thread_data()->value, callback);
// ompt_get_state()
printf("%" PRIu64 ": ompt_get_state()=%d\n", ompt_get_thread_data()->value,
ompt_get_state(NULL));
// ompt_enumerate_states()
int state = ompt_state_undefined;
const char *state_name;
int steps = 0;
while (ompt_enumerate_states(state, &state, &state_name) && steps < 1000) {
steps++;
if (!state_name)
printf("%" PRIu64 ": state_name is NULL\n",
ompt_get_thread_data()->value);
}
if (steps >= 1000) {
// enumeration did not end after 1000 steps
printf("%" PRIu64 ": states enumeration did not end\n",
ompt_get_thread_data()->value);
}
// ompt_enumerate_mutex_impls()
int impl = ompt_mutex_impl_none;
const char *impl_name;
steps = 0;
while (ompt_enumerate_mutex_impls(impl, &impl, &impl_name) &&
steps < 1000) {
steps++;
if (!impl_name)
printf("%" PRIu64 ": impl_name is NULL\n",
ompt_get_thread_data()->value);
}
if (steps >= 1000) {
// enumeration did not end after 1000 steps
printf("%" PRIu64 ": mutex_impls enumeration did not end\n",
ompt_get_thread_data()->value);
}
}
// Check if libomp supports the callbacks for this test.
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
// CHECK: {{^}}[[THREAD_ID:[0-9]+]]: &on_ompt_callback_thread_begin
// CHECK-SAME: =[[FUNCTION_POINTER:0x[0-f]+]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_get_callback() result=[[FUNCTION_POINTER]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_get_state()=1
// CHECK-NOT: {{^}}[[THREAD_ID]]: state_name is NULL
// CHECK-NOT: {{^}}[[THREAD_ID]]: states enumeration did not end
// CHECK-NOT: {{^}}[[THREAD_ID]]: impl_name is NULL
// CHECK-NOT: {{^}}[[THREAD_ID]]: mutex_impls enumeration did not end
return 0;
}
|
ompcompress.c | #ifdef _OPENMP
#if defined(WITH_IPP)
/*
* This source code file was modified with Intel(R) Integrated Performance Primitives library content
*/
#endif
/* compress 1d contiguous array in parallel */
static void
_t2(compress_omp, Scalar, 1)(zfp_stream* stream, const zfp_field* field)
{
/* array metadata */
const Scalar* data = (const Scalar*)field->data;
uint nx = field->nx;
/* number of omp threads, blocks, and chunks */
uint threads = thread_count_omp(stream);
uint blocks = (nx + 3) / 4;
uint chunks = chunk_count_omp(stream, blocks, threads);
/* allocate per-thread streams */
bitstream** bs = compress_init_par(stream, field, chunks, blocks);
if (!bs)
return;
/* compress chunks of blocks in parallel */
int chunk;
#pragma omp parallel for num_threads(threads)
for (chunk = 0; chunk < (int)chunks; chunk++) {
/* determine range of block indices assigned to this thread */
uint bmin = chunk_offset(blocks, chunks, chunk + 0);
uint bmax = chunk_offset(blocks, chunks, chunk + 1);
uint block;
/* set up thread-local bit stream */
zfp_stream s = *stream;
zfp_stream_set_bit_stream(&s, bs[chunk]);
/* compress sequence of blocks */
for (block = bmin; block < bmax; block++) {
/* determine block origin x within array */
const Scalar* p = data;
uint x = 4 * block;
p += x;
/* compress partial or full block */
if (nx - x < 4)
_t2(zfp_encode_partial_block_strided, Scalar, 1)(&s, p, MIN(nx - x, 4u), 1);
else
_t2(zfp_encode_block, Scalar, 1)(&s, p);
}
}
/* concatenate per-thread streams */
compress_finish_par(stream, bs, chunks);
}
/* compress 1d strided array in parallel */
static void
_t2(compress_strided_omp, Scalar, 1)(zfp_stream* stream, const zfp_field* field)
{
/* array metadata */
const Scalar* data = (const Scalar*)field->data;
uint nx = field->nx;
int sx = field->sx ? field->sx : 1;
/* number of omp threads, blocks, and chunks */
uint threads = thread_count_omp(stream);
uint blocks = (nx + 3) / 4;
uint chunks = chunk_count_omp(stream, blocks, threads);
/* allocate per-thread streams */
bitstream** bs = compress_init_par(stream, field, chunks, blocks);
if (!bs)
return;
/* compress chunks of blocks in parallel */
int chunk;
#pragma omp parallel for num_threads(threads)
for (chunk = 0; chunk < (int)chunks; chunk++) {
/* determine range of block indices assigned to this thread */
uint bmin = chunk_offset(blocks, chunks, chunk + 0);
uint bmax = chunk_offset(blocks, chunks, chunk + 1);
uint block;
/* set up thread-local bit stream */
zfp_stream s = *stream;
zfp_stream_set_bit_stream(&s, bs[chunk]);
/* compress sequence of blocks */
for (block = bmin; block < bmax; block++) {
/* determine block origin x within array */
const Scalar* p = data;
uint x = 4 * block;
p += sx * (ptrdiff_t)x;
/* compress partial or full block */
if (nx - x < 4)
_t2(zfp_encode_partial_block_strided, Scalar, 1)(&s, p, MIN(nx - x, 4u), sx);
else
_t2(zfp_encode_block_strided, Scalar, 1)(&s, p, sx);
}
}
/* concatenate per-thread streams */
compress_finish_par(stream, bs, chunks);
}
/* compress 2d strided array in parallel */
static void
_t2(compress_strided_omp, Scalar, 2)(zfp_stream* stream, const zfp_field* field)
{
/* array metadata */
const Scalar* data = (const Scalar*)field->data;
uint nx = field->nx;
uint ny = field->ny;
int sx = field->sx ? field->sx : 1;
int sy = field->sy ? field->sy : (int)nx;
/* number of omp threads, blocks, and chunks */
uint threads = thread_count_omp(stream);
uint bx = (nx + 3) / 4;
uint by = (ny + 3) / 4;
uint blocks = bx * by;
uint chunks = chunk_count_omp(stream, blocks, threads);
/* allocate per-thread streams */
bitstream** bs = compress_init_par(stream, field, chunks, blocks);
if (!bs)
return;
/* compress chunks of blocks in parallel */
int chunk;
#pragma omp parallel for num_threads(threads)
for (chunk = 0; chunk < (int)chunks; chunk++) {
/* determine range of block indices assigned to this thread */
uint bmin = chunk_offset(blocks, chunks, chunk + 0);
uint bmax = chunk_offset(blocks, chunks, chunk + 1);
uint block;
/* set up thread-local bit stream */
zfp_stream s = *stream;
zfp_stream_set_bit_stream(&s, bs[chunk]);
/* compress sequence of blocks */
for (block = bmin; block < bmax; block++) {
/* determine block origin (x, y) within array */
const Scalar* p = data;
uint b = block;
uint x, y;
x = 4 * (b % bx); b /= bx;
y = 4 * b;
p += sx * (ptrdiff_t)x + sy * (ptrdiff_t)y;
/* compress partial or full block */
if (nx - x < 4 || ny - y < 4)
_t2(zfp_encode_partial_block_strided, Scalar, 2)(&s, p, MIN(nx - x, 4u), MIN(ny - y, 4u), sx, sy);
else
_t2(zfp_encode_block_strided, Scalar, 2)(&s, p, sx, sy);
}
}
/* concatenate per-thread streams */
compress_finish_par(stream, bs, chunks);
}
#if defined(IPP_OPTIMIZATION_ENABLED) && !defined(_SET_TMP_BLOCK_FROM_)
#define _SET_TMP_BLOCK_FROM_
static void CopyFromPartialBlock(const Ipp32f *pSrc, int stepY, int stepZ, int sizeX, int sizeY, int sizeZ, Ipp32f *pTmpBlock)
{
Ipp32f *pTmp;
int x, y, z, serIdx;
int copyX, copyY, copyZ;
for (serIdx = z = 0; z < 4; z++) {
copyZ = (z < sizeZ) ? z : sizeZ - 1;
for (y = 0; y < 4; y++) {
copyY = (y < sizeY) ? y : sizeY - 1;
pTmp = (Ipp32f*)pSrc + copyZ * stepZ + copyY * stepY;
for (x = 0; x < 4; x++) {
copyX = (x < sizeX) ? x : sizeX - 1;
pTmpBlock[serIdx++] = pTmp[copyX];
}
}
}
}
#endif
/* compress 3d strided array in parallel */
static void
_t2(compress_strided_omp, Scalar, 3)(zfp_stream* stream, const zfp_field* field)
{
/* array metadata */
const Scalar* data = (const Scalar*)field->data;
uint nx = field->nx;
uint ny = field->ny;
uint nz = field->nz;
int sx = field->sx ? field->sx : 1;
int sy = field->sy ? field->sy : (int)nx;
int sz = field->sz ? field->sz : (int)(nx * ny);
/* number of omp threads, blocks, and chunks */
uint threads = thread_count_omp(stream);
uint bx = (nx + 3) / 4;
uint by = (ny + 3) / 4;
uint bz = (nz + 3) / 4;
uint blocks = bx * by * bz;
uint chunks = chunk_count_omp(stream, blocks, threads);
/* allocate per-thread streams */
bitstream** bs = compress_init_par(stream, field, chunks, blocks);
#if defined (IPP_OPTIMIZATION_ENABLED)
IppEncodeZfpState_32f* pStates = NULL;
Ipp64u* chunk_bit_lengths = (Ipp64u*)malloc(sizeof(Ipp64u)* chunks);
int srcBlockLineStep = nx * sizeof(Ipp32f);
int srcBlockPlaneStep = ny * srcBlockLineStep;
uint min_bits, max_bits, max_prec;
int min_exp;
int sizeState = 0;
if (!(REVERSIBLE(stream)))
{
zfp_stream_params(stream, &min_bits, &max_bits, &max_prec, &min_exp);
ippsEncodeZfpGetStateSize_32f(&sizeState);
pStates = (IppEncodeZfpState_32f*)ippsMalloc_8u(sizeState * threads);
}
#endif
/* compress chunks of blocks in parallel */
int chunk;
#if !defined (IPP_OPTIMIZATION_ENABLED)
#pragma omp parallel for num_threads(threads)
#else
#pragma omp parallel \
num_threads(threads)
{
bitstream *pBitStream = NULL;
IppEncodeZfpState_32f* pState = NULL;
Ipp32f pTmpBlock[64];
if (!(REVERSIBLE(stream)))
{
pState = (IppEncodeZfpState_32f*)((Ipp8u*)pStates + omp_get_thread_num() * sizeState);
}
#pragma omp for
#endif
for (chunk = 0; chunk < (int)chunks; chunk++) {
/* determine range of block indices assigned to this thread */
uint bmin = chunk_offset(blocks, chunks, chunk + 0);
uint bmax = chunk_offset(blocks, chunks, chunk + 1);
uint block;
/* set up thread-local bit stream */
zfp_stream s = *stream;
zfp_stream_set_bit_stream(&s, bs[chunk]);
#if defined (IPP_OPTIMIZATION_ENABLED)
if (!(REVERSIBLE(stream)))
{
pBitStream = bs[chunk];
ippsEncodeZfpInitLong_32f((Ipp8u*)stream_data(pBitStream), stream_capacity(pBitStream), pState);
ippsEncodeZfpSet_32f(min_bits, max_bits, max_prec, min_exp, pState);
}
#endif
/* compress sequence of blocks */
for (block = bmin; block < bmax; block++) {
/* determine block origin (x, y, z) within array */
const Scalar* p = data;
uint b = block;
uint x, y, z;
x = 4 * (b % bx); b /= bx;
y = 4 * (b % by); b /= by;
z = 4 * b;
p += sx * (ptrdiff_t)x + sy * (ptrdiff_t)y + sz * (ptrdiff_t)z;
/* compress partial or full block */
if (nx - x < 4 || ny - y < 4 || nz - z < 4)
{
#if !defined(IPP_OPTIMIZATION_ENABLED)
_t2(zfp_encode_partial_block_strided, Scalar, 3)(&s, p, MIN(nx - x, 4u), MIN(ny - y, 4u), MIN(nz - z, 4u), sx, sy, sz);
#else
if (!(REVERSIBLE(stream)))
{
CopyFromPartialBlock((const Ipp32f *)p, sy, sz, MIN(nx - x, 4u), MIN(ny - y, 4u), MIN(nz - z, 4u), pTmpBlock);
ippsEncodeZfp444_32f(pTmpBlock, 4 * sizeof(Ipp32f), 4 * 4 * sizeof(Ipp32f), pState);
}
else
{
_t2(zfp_encode_partial_block_strided, Scalar, 3)(&s, p, MIN(nx - x, 4u), MIN(ny - y, 4u), MIN(nz - z, 4u), sx, sy, sz);
}
#endif
}
else
{
#if !defined(IPP_OPTIMIZATION_ENABLED)
_t2(zfp_encode_block_strided, Scalar, 3)(&s, p, sx, sy, sz);
#else
if (!(REVERSIBLE(stream)))
{
ippsEncodeZfp444_32f((const Ipp32f *)p, srcBlockLineStep, srcBlockPlaneStep, pState);
}
else
{
_t2(zfp_encode_block_strided, Scalar, 3)(&s, p, sx, sy, sz);
}
#endif
}
}
#if defined (IPP_OPTIMIZATION_ENABLED)
if (!(REVERSIBLE(stream)) && pState != NULL)
{
Ipp64u chunk_compr_length;
ippsEncodeZfpGetCompressedBitSize_32f(pState, &chunk_bit_lengths[chunk]);
ippsEncodeZfpFlush_32f(pState);
chunk_compr_length = (size_t)((chunk_bit_lengths[chunk] + 7) >> 3);
stream_set_eos(pBitStream, chunk_compr_length);
}
#endif
}
#if defined (IPP_OPTIMIZATION_ENABLED)
}//The end of pragma omp parallel block
/* concatenate per-thread streams */
if (!(REVERSIBLE(stream)) && pStates != NULL)
{
compress_finish_par_opt(stream, bs, chunks, chunk_bit_lengths);
free(chunk_bit_lengths);
ippsFree(pStates);
return;
}
#endif
compress_finish_par(stream, bs, chunks);
}
/* compress 4d strided array in parallel */
static void
_t2(compress_strided_omp, Scalar, 4)(zfp_stream* stream, const zfp_field* field)
{
/* array metadata */
const Scalar* data = field->data;
uint nx = field->nx;
uint ny = field->ny;
uint nz = field->nz;
uint nw = field->nw;
int sx = field->sx ? field->sx : 1;
int sy = field->sy ? field->sy : (int)nx;
int sz = field->sz ? field->sz : (int)(nx * ny);
int sw = field->sw ? field->sw : (int)(nx * ny * nz);
/* number of omp threads, blocks, and chunks */
uint threads = thread_count_omp(stream);
uint bx = (nx + 3) / 4;
uint by = (ny + 3) / 4;
uint bz = (nz + 3) / 4;
uint bw = (nw + 3) / 4;
uint blocks = bx * by * bz * bw;
uint chunks = chunk_count_omp(stream, blocks, threads);
/* allocate per-thread streams */
bitstream** bs = compress_init_par(stream, field, chunks, blocks);
if (!bs)
return;
/* compress chunks of blocks in parallel */
int chunk;
#pragma omp parallel for num_threads(threads)
for (chunk = 0; chunk < (int)chunks; chunk++) {
/* determine range of block indices assigned to this thread */
uint bmin = chunk_offset(blocks, chunks, chunk + 0);
uint bmax = chunk_offset(blocks, chunks, chunk + 1);
uint block;
/* set up thread-local bit stream */
zfp_stream s = *stream;
zfp_stream_set_bit_stream(&s, bs[chunk]);
/* compress sequence of blocks */
for (block = bmin; block < bmax; block++) {
/* determine block origin (x, y, z, w) within array */
const Scalar* p = data;
uint b = block;
uint x, y, z, w;
x = 4 * (b % bx); b /= bx;
y = 4 * (b % by); b /= by;
z = 4 * (b % bz); b /= bz;
w = 4 * b;
p += sx * (ptrdiff_t)x + sy * (ptrdiff_t)y + sz * (ptrdiff_t)z + sw * (ptrdiff_t)w;
/* compress partial or full block */
if (nx - x < 4 || ny - y < 4 || nz - z < 4 || nw - w < 4)
_t2(zfp_encode_partial_block_strided, Scalar, 4)(&s, p, MIN(nx - x, 4u), MIN(ny - y, 4u), MIN(nz - z, 4u), MIN(nw - w, 4u), sx, sy, sz, sw);
else
_t2(zfp_encode_block_strided, Scalar, 4)(&s, p, sx, sy, sz, sw);
}
}
/* concatenate per-thread streams */
compress_finish_par(stream, bs, chunks);
}
#endif
|
3d7pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 32;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,2);t1++) {
lbp=max(ceild(t1,2),ceild(4*t1-Nt+3,4));
ubp=min(floord(Nt+Nz-4,4),floord(2*t1+Nz-1,4));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-15,16)),ceild(4*t2-Nz-28,32));t3<=min(min(min(floord(4*t2+Ny,32),floord(Nt+Ny-4,32)),floord(2*t1+Ny+1,32)),floord(4*t1-4*t2+Nz+Ny-1,32));t3++) {
for (t4=max(max(max(0,ceild(t1-15,16)),ceild(4*t2-Nz-28,32)),ceild(32*t3-Ny-28,32));t4<=min(min(min(min(floord(4*t2+Nx,32),floord(Nt+Nx-4,32)),floord(2*t1+Nx+1,32)),floord(32*t3+Nx+28,32)),floord(4*t1-4*t2+Nz+Nx-1,32));t4++) {
for (t5=max(max(max(max(max(0,2*t1),4*t1-4*t2+1),4*t2-Nz+2),32*t3-Ny+2),32*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,2*t1+3),4*t2+2),32*t3+30),32*t4+30),4*t1-4*t2+Nz+1);t5++) {
for (t6=max(max(4*t2,t5+1),-4*t1+4*t2+2*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(32*t3,t5+1);t7<=min(32*t3+31,t5+Ny-2);t7++) {
lbv=max(32*t4,t5+1);
ubv=min(32*t4+31,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
matmul.c | #include <stdlib.h>
#include <sys/time.h>
#include <stdio.h>
#include <omp.h>
#ifndef _N_
#define _N_ 512
#endif
//#define PRINT_RESULT
int N = _N_;
int M = _N_;
int P = _N_;
double my_timer ()
{
struct timeval time;
gettimeofday (&time, 0);
return time.tv_sec + time.tv_usec / 1000000.0;
}
void
MatrixMultiplication_openacc(float * a,float * b, float * c)
{
int i, j, k ;
#pragma acc data copyout(a[0:(M*N)]), copyin(b[0:(M*P)],c[0:(P*N)])
{
#pragma acc kernels loop independent gang
for (i=0; i<M; i++){
#pragma acc loop worker
for (j=0; j<N; j++)
{
float sum = 0.0 ;
#pragma acc loop seq
for (k=0; k<P; k++) {
sum += b[i*P+k]*c[k*N+j] ;
}
a[i*N+j] = sum ;
}
}
}
#ifdef PRINT_RESULT
for (i=0; i<4; i++){
printf("a[%d] = %f\n", i, a[i]);
}
#endif
}
void
MatrixMultiplication_openmp(float * a,float * b, float * c)
{
int i, j, k ;
int chunk = N/4;
#pragma omp parallel shared(a,b,c,chunk) private(i,j,k)
{
#ifdef _OPENMP
if(omp_get_thread_num() == 0) {
printf("Number of OpenMP threads %d\n", omp_get_num_threads());
}
#endif
#pragma omp for
for (i=0; i<M; i++){
for (j=0; j<N; j++)
{
float sum = 0.0 ;
for (k=0; k<P; k++)
sum += b[i*P+k]*c[k*N+j] ;
a[i*N+j] = sum ;
}
}
}
}
int main()
{
float *a, *b, *c;
int i;
double elapsed_time;
a = (float *) malloc(M*N*sizeof(float));
b = (float *) malloc(M*P*sizeof(float));
c = (float *) malloc(P*N*sizeof(float));
for (i = 0; i < M*N; i++) {
a[i] = (float) 0.0;
}
for (i = 0; i < M*P; i++) {
b[i] = (float) i;
}
for (i = 0; i < P*N; i++) {
c[i] = (float) 1.0;
}
elapsed_time = my_timer();
MatrixMultiplication_openmp(a,b,c);
elapsed_time = my_timer() - elapsed_time;
printf("CPU Elapsed time = %lf sec\n", elapsed_time);
elapsed_time = my_timer();
MatrixMultiplication_openacc(a,b,c);
elapsed_time = my_timer() - elapsed_time;
printf("Accelerator Elapsed time = %lf sec\n", elapsed_time);
free(a);
free(b);
free(c);
return 0;
}
|
mafillvmain.c | /* CalculiX - A 3-dimensional finite element program */
/* Copyright (C) 1998-2015 Guido Dhondt */
/* This program is free software; you can redistribute it and/or */
/* modify it under the terms of the GNU General Public License as */
/* published by the Free Software Foundation(version 2); */
/* */
/* This program is distributed in the hope that it will be useful, */
/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
/* GNU General Public License for more details. */
/* You should have received a copy of the GNU General Public License */
/* along with this program; if not, write to the Free Software */
/* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
#include <unistd.h>
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <pthread.h>
#include "CalculiX.h"
static char *lakonf1;
static ITG num_cpus,*nef1,*ipnei1,*neifa1,*neiel1,*jq1,*irow1,*nzs1,*ielfa1,*
ifabou1,*nbody1,*neq1,*nactdohinv1,*icyclic1,*ifatie1;
static double *auv1=NULL,*adv1=NULL,*bv1=NULL,*vfa1,*xxn1,*area1,*vel1,
*cosa1,*umfa1,*xlet1,*xle1,*gradvfa1,*xxi1,*body1,*volume1,*dtimef1,
*velo1,*veloo1,*sel1,*xrlfa1,*gamma1,*xxj1,*a11,*a21,*a31,*flux1,
*c1;
void mafillvmain(ITG *nef,ITG *ipnei,ITG *neifa,ITG *neiel,
double *vfa,double *xxn,double *area,double *auv,double *adv,
ITG *jq,ITG *irow,ITG *nzs,double *bv,double *vel,double *cosa,
double *umfa,double *xlet,double *xle,double *gradvfa,
double *xxi,double *body,double *volume,
ITG *ielfa,char *lakonf,ITG *ifabou,ITG *nbody,ITG *neq,
double *dtimef,double *velo,double *veloo,
double *sel,double *xrlfa,double *gamma,double *xxj,
ITG *nactdohinv,double *a1,double *a2,double *a3,
double *flux,ITG *icyclic,double *c,ITG *ifatie){
ITG i,j;
/* variables for multithreading procedure */
ITG sys_cpus,*ithread=NULL;
char *env,*envloc,*envsys;
num_cpus = 0;
sys_cpus=0;
/* explicit user declaration prevails */
envsys=getenv("NUMBER_OF_CPUS");
if(envsys){
sys_cpus=atoi(envsys);
if(sys_cpus<0) sys_cpus=0;
}
/* automatic detection of available number of processors */
if(sys_cpus==0){
sys_cpus = getSystemCPUs();
if(sys_cpus<1) sys_cpus=1;
}
/* local declaration prevails, if strictly positive */
envloc = getenv("CCX_NPROC_CFD");
if(envloc){
num_cpus=atoi(envloc);
if(num_cpus<0){
num_cpus=0;
}else if(num_cpus>sys_cpus){
num_cpus=sys_cpus;
}
}
/* else global declaration, if any, applies */
env = getenv("OMP_NUM_THREADS");
if(num_cpus==0){
if (env)
num_cpus = atoi(env);
if (num_cpus < 1) {
num_cpus=1;
}else if(num_cpus>sys_cpus){
num_cpus=sys_cpus;
}
}
// next line is to be inserted in a similar way for all other paralell parts
if(*nef<num_cpus) num_cpus=*nef;
pthread_t tid[num_cpus];
/* allocating fields for lhs and rhs matrix */
NNEW(adv1,double,num_cpus**neq);
NNEW(auv1,double,(long long)num_cpus*2**nzs);
NNEW(bv1,double,num_cpus*3**neq);
/* calculating the stiffness and/or mass matrix
(symmetric part) */
nef1=nef;ipnei1=ipnei;neifa1=neifa;neiel1=neiel;vfa1=vfa;xxn1=xxn;
area1=area;jq1=jq;irow1=irow;nzs1=nzs;vel1=vel;cosa1=cosa;umfa1=umfa;
xlet1=xlet;xle1=xle;gradvfa1=gradvfa;xxi1=xxi;body1=body;volume1=volume;
ielfa1=ielfa;lakonf1=lakonf;ifabou1=ifabou;nbody1=nbody;neq1=neq;
dtimef1=dtimef;velo1=velo;veloo1=veloo;sel1=sel;xrlfa1=xrlfa;
gamma1=gamma;xxj1=xxj;nactdohinv1=nactdohinv;a11=a1;a21=a2;a31=a3;
flux1=flux;icyclic1=icyclic;c1=c;ifatie1=ifatie;
/* create threads and wait */
NNEW(ithread,ITG,num_cpus);
for(i=0; i<num_cpus; i++) {
ithread[i]=i;
pthread_create(&tid[i], NULL, (void *)mafillvmt, (void *)&ithread[i]);
}
for(i=0; i<num_cpus; i++) pthread_join(tid[i], NULL);
SFREE(ithread);
/* copying and accumulating the stiffnes and/or mass matrix */
#pragma omp parallel \
default(none) \
shared(neq,adv,adv1,num_cpus,nzs,auv,auv1,bv,bv1) \
private(i,j)
{
#pragma omp for
for(i=0;i<*neq;i++){
adv[i]=adv1[i];
for(j=1;j<num_cpus;j++){
adv[i]+=adv1[i+j**neq];
}
}
#pragma omp for
for(i=0;i<2**nzs;i++){
auv[i]=auv1[i];
for(j=1;j<num_cpus;j++){
auv[i]+=auv1[i+(long long)j*2**nzs];
}
}
#pragma omp for
for(i=0;i<3**neq;i++){
bv[i]=bv1[i];
for(j=1;j<num_cpus;j++){
bv[i]+=bv1[i+j*3**neq];
}
}
}
SFREE(adv1);
SFREE(auv1);
SFREE(bv1);
return;
}
/* subroutine for multithreading of mafillv */
void *mafillvmt(ITG *i){
ITG indexadv,indexbv,nefa,nefb,nefdelta;
long long indexauv;
indexadv=*i**neq1;
indexauv=(long long)*i*2**nzs1;
indexbv=*i*3**neq1;
// ceil -> floor
nefdelta=(ITG)floor(*nef1/(double)num_cpus);
nefa=*i*nefdelta+1;
nefb=(*i+1)*nefdelta;
// next line! -> all parallel sections
if((*i==num_cpus-1)&&(nefb<*nef1)) nefb=*nef1;
FORTRAN(mafillv,(nef1,ipnei1,neifa1,neiel1,vfa1,xxn1,area1,
&auv1[indexauv],&adv1[indexadv],jq1,irow1,nzs1,&bv1[indexbv],
vel1,cosa1,umfa1,xlet1,xle1,gradvfa1,xxi1,
body1,volume1,ielfa1,lakonf1,ifabou1,nbody1,neq1,
dtimef1,velo1,veloo1,sel1,xrlfa1,gamma1,xxj1,nactdohinv1,a11,
a21,a31,flux1,&nefa,&nefb,icyclic1,c1,ifatie1));
return NULL;
}
|
GB_binop__max_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__max_fp64)
// A.*B function (eWiseMult): GB (_AemultB_01__max_fp64)
// A.*B function (eWiseMult): GB (_AemultB_02__max_fp64)
// A.*B function (eWiseMult): GB (_AemultB_03__max_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__max_fp64)
// A*D function (colscale): GB (_AxD__max_fp64)
// D*A function (rowscale): GB (_DxB__max_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__max_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__max_fp64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__max_fp64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__max_fp64)
// C=scalar+B GB (_bind1st__max_fp64)
// C=scalar+B' GB (_bind1st_tran__max_fp64)
// C=A+scalar GB (_bind2nd__max_fp64)
// C=A'+scalar GB (_bind2nd_tran__max_fp64)
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = fmax (aij, bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
double aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
double bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = fmax (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MAX || GxB_NO_FP64 || GxB_NO_MAX_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__max_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__max_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__max_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__max_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__max_fp64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__max_fp64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__max_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__max_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__max_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__max_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__max_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__max_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = GBX (Bx, p, false) ;
Cx [p] = fmax (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__max_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = GBX (Ax, p, false) ;
Cx [p] = fmax (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = fmax (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__max_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = fmax (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__max_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
pr81887.c | /* PR c/81887 */
/* { dg-do compile } */
/* { dg-options "-fno-openmp -fopenmp-simd -fdump-tree-gimple" } */
/* { dg-final { scan-tree-dump-times "#pragma omp simd" 2 "gimple" } } */
/* { dg-final { scan-tree-dump-times "#pragma omp ordered simd\[ \t]*\[\n\r]" 2 "gimple" } } */
/* { dg-final { scan-tree-dump-times "#pragma omp" 4 "gimple" } } */
void
f1 (int *x)
{
int i;
#pragma omp simd
for (i = 0; i < 100; i++)
#pragma omp ordered simd
x[i / 2] = i;
}
void
f2 (int *x)
{
int i;
#pragma omp parallel for simd ordered
for (i = 0; i < 100; i++)
#pragma omp ordered threads simd
x[i / 2] = i;
}
void
f3 (int *x)
{
int i;
#pragma omp parallel for ordered
for (i = 0; i < 100; i++)
#pragma omp ordered
x[i / 2] = i;
}
void
f4 (int *x)
{
int i;
#pragma omp parallel for ordered
for (i = 0; i < 100; i++)
#pragma omp ordered threads
x[i / 2] = i;
}
void
f5 (int n, int ***x)
{
int i, j, k;
#pragma omp parallel for ordered(3)
for (i=0; i < n; i++)
for (j=0; j < n; ++j)
for (k=0; k < n; ++k)
{
#pragma omp ordered depend(sink:i-8,j-2,k+2) depend(sink:i, j-1,k) depend(sink:i-4,j-3,k+6) depend(sink:i-6,j-4,k-6)
x[i][j][k] = i + j + k;
#pragma omp ordered depend(source)
}
}
|
encrypt.c | /************************************************************************************
File: encrypt.c
Generates the encrypted page
***********************************************************************************/
#include "pixmap.h"
#include <omp.h>
void encrypt (unsigned char *v1, unsigned char *v2)
{
//////////////////////////////////////////////////////////////////////////////////////
/* TO COMPLETE: code to calculate the encryption */
//////////////////////////////////////////////////////////////////////////////////////
int enc_m[2][2] = {{21, 35},{18, 79}};
int n1 = (enc_m[0][0] * *v1 + enc_m[0][1] * *v2) % 256;
int n2 = (enc_m[1][0] * *v1 + enc_m[1][1] * *v2) % 256;
*v1 = n1;
*v2 = n2;
}
void generate_encrypted_page(page in_page, page *out_page)
{
generate_page(out_page,in_page.h,in_page.w,BLACK);
//////////////////////////////////////////////////////////////////////////////////////
/* TO COMPLETE: code to generate the encrypted page*/
//////////////////////////////////////////////////////////////////////////////////////
int n = (in_page.h*in_page.w);
#pragma omp parallel for schedule(guided)
for(int i = 0; i < n-1; i+=2) {
out_page->dat[i] = in_page.dat[i];
out_page->dat[i+1] = in_page.dat[i+1];
encrypt(&out_page->dat[i], &out_page->dat[i+1]);
}
}
|
GB_unop__ainv_fc64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__ainv_fc64_fc64)
// op(A') function: GB (_unop_tran__ainv_fc64_fc64)
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = aij
// unaryop: cij = GB_FC64_ainv (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_FC64_ainv (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = aij ; \
Cx [pC] = GB_FC64_ainv (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__ainv_fc64_fc64)
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = GB_FC64_ainv (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = GB_FC64_ainv (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__ainv_fc64_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
cc_fullycon.c | #include <stdio.h>
#include "cc_assert.h"
#include "cc_basic.h"
#include "cc_conv2d.h"
#include "cc_fmap2d.h"
#include "cc_tsrmgr.h"
#include "cc_fullycon.h"
#include "global_fn_cfg.h"
extern fn_array_dot_prod _array_dot_prod;
cc_tensor_t *cc_fully_connected(const cc_tensor_t *inp,
const cc_tensor_t *w, const cc_tensor_t *b, const char *name)
{
cc_ssize i, mmsize, dtsize;
cc_tensor_t *oup = NULL;
cc_ssize shape[CC_CNN2D_SHAPE] = {0};
#ifdef ENABLE_CC_ASSERT
cc_assert((cc_dimension(w) == CC_CONV2D_KERNEL_DIM) ||
(cc_dimension(w) == CC_FULLYCON_KERNEL_DIM));
cc_assert_zero(cc_dimension(inp) - CC_CNN2D_DIM);
cc_assert_zero(*inp->dtype - *w->dtype);
cc_assert_zero(*inp->dtype - *b->dtype);
cc_assert_zero(inp->shape[CC_CNN2D_SHAPE_C]
- w->shape[CC_CONV2D_KERNEL_I]);
#endif
#ifdef AUTO_TSRMGR
oup = cc_tsrmgr_get(name);
#endif
if (!oup) {
shape[CC_CNN2D_SHAPE_C] = w->shape[CC_CONV2D_KERNEL_O];
shape[CC_CNN2D_SHAPE_H] = 1;
shape[CC_CNN2D_SHAPE_W] = 1;
oup = cc_create(shape, *inp->dtype, name);
}
dtsize = cc_dtype_size(*inp->dtype);
mmsize = inp->shape[CC_CNN2D_SHAPE_C] * dtsize;
#ifdef ENABLE_OPENMP
#pragma omp parallel for private(i)
#endif
for (i = 0; i < w->shape[CC_CONV2D_KERNEL_O]; ++i) {
_array_dot_prod(inp->data, w->data + i * mmsize,
inp->shape[CC_CNN2D_SHAPE_C],
oup->data + i * dtsize, *inp->dtype);
}
if (b)
oup = cc_fmap2d_bias(oup, b, oup->name);
return oup;
}
|
helper.h | //
// Created by Lei Ma on 9/8/17.
//
#ifndef HALO_PARALLEL_HELPER_H
#define HALO_PARALLEL_HELPER_H
#include "initializer.h"
#include <ctime>
#include <omp.h> // for openmp
namespace Helper
{
double sgnf(double val) {
return val/std::abs(val);
}
// Reverse an array of state_type and store in a new array; Tested.
void state_array_reverse( const state_type state [], state_type state_reversed [], const int length){
for(int i=0; i< length; i++){
for(int j=0; j < 3; j++) {
state_reversed[i][j] = state[length -1 - i][j];
}
}
// return state_reversed;
}
void state_array_copy( const state_type state [], state_type state_copied [], const int length) {
for(int i=0; i< length; i++){
for(int j=0; j < 3; j++) {
state_copied[i][j] = state[i][j];
}
}
}
void sa_ptr_swap( StateArray* &stateptr, StateArray* &stateptr_swap) {
StateArray *saptr_temp = stateptr;
stateptr = stateptr_swap;
stateptr_swap = saptr_temp;
}
void state_avg( StateArray* statearrayavg, StateArray* statearray, const int length, double alpha = 0.5) {
double norm_avg = 1.0;
double ele = 0.0;
double sumrecp;
#pragma omp parallel for
for(int i=0; i< length; i++) {
sumrecp = 0.0; // Have to set it to 0 for each iteration in i
for(int j=0;j < 3;j++) {
ele = (alpha) * (*statearray)[i][j] + (1 - alpha) * (*statearrayavg)[i][j];
(*statearrayavg)[i][j] = ele;
sumrecp = sumrecp + ele*ele;
}
sumrecp = 1/( norm_avg*std::sqrt(sumrecp) );
for(int j=0;j < 3;j++) {
(*statearrayavg)[i][j] = (*statearrayavg)[i][j] *sumrecp;
}
}
}
double squared_difference( StateArray *current_state, StateArray *past_state, const int length, const int savestep = 1 ) {
double sum = 0;
for(int i = 0; i < length; i+=savestep) {
sum += pow( (*current_state)[i][0] - (*past_state)[i][0], 2.0 );
// sum += abs( (*current_state)[i][0] - (*past_state)[i][0] ); // Calculation of absolute value is much much slower than square.
}
return sum;
}
}
// End of Helper namespace
class Timing {
public:
Timing() :rawtime{time(0)} {} //Constructor
string timestamp()
{
// time (&rawtime);
tm *ctm = localtime(&rawtime);
int year = ctm->tm_year + 1900;
int month = ctm->tm_mon+1;
int day = ctm->tm_mday;
int hour = ctm->tm_hour;
int min = ctm->tm_min;
int sec = ctm->tm_sec;
return to_string(year) + "-" + to_string(month)+ "-"+to_string(day)+ "-" + to_string(hour) + "-"+ to_string(min) + "-" + to_string(sec);
}
string time_pretty()
{
return ctime (&rawtime);
}
clock_t stopwatch_reset()
{
const clock_t stopwatch_begin = clock();
return stopwatch_begin;
}
void stopwatch_info(const clock_t stopwatch_begin_time, const int NIterations, const double calc_iter=1000.0)
{
const clock_t stopwatch_end = clock();
cout << "Total clock time: " << float( stopwatch_end - stopwatch_begin_time ) / CLOCKS_PER_SEC << endl;
cout << "Clock time for 1000 iterations: " << calc_iter * float( stopwatch_end - stopwatch_begin_time ) / CLOCKS_PER_SEC / NIterations << endl;
}
clock_t wall_stopwatch_reset()
{
const clock_t stopwatch_begin = omp_get_wtime();
return stopwatch_begin;
}
void wall_stopwatch_info(const clock_t stopwatch_begin_time, const int NIterations, const double calc_iter=1000.0)
{
const clock_t stopwatch_end = omp_get_wtime();
cout << "Total clock time: " << float( stopwatch_end - stopwatch_begin_time ) << endl;
cout << "Clock time for 1000 iterations: " << calc_iter * float( stopwatch_end - stopwatch_begin_time ) / NIterations << endl;
}
private:
time_t rawtime;
};
#endif
|
displacement_residual_contact_criteria.h | // KRATOS ___| | | |
// \___ \ __| __| | | __| __| | | __| _` | |
// | | | | | ( | | | | ( | |
// _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS
//
// License: BSD License
// license: StructuralMechanicsApplication/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_DISPLACEMENT_RESIDUAL_CONTACT_CRITERIA_H)
#define KRATOS_DISPLACEMENT_RESIDUAL_CONTACT_CRITERIA_H
/* System includes */
/* External includes */
/* Project includes */
#include "utilities/table_stream_utility.h"
#include "solving_strategies/convergencecriterias/convergence_criteria.h"
#include "utilities/color_utilities.h"
namespace Kratos
{
///@addtogroup ContactStructuralMechanicsApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@name Kratos Classes
///@{
/**
* @class DisplacementResidualContactCriteria
* @ingroup ContactStructuralMechanicsApplication
* @brief Convergence criteria for contact problems
* This class implements a convergence control based on nodal displacement (for penalty contact)
* @author Vicente Mataix Ferrandiz
*/
template< class TSparseSpace,
class TDenseSpace >
class DisplacementResidualContactCriteria
: public ConvergenceCriteria< TSparseSpace, TDenseSpace >
{
public:
///@name Type Definitions
///@{
/// Pointer definition of DisplacementResidualContactCriteria
KRATOS_CLASS_POINTER_DEFINITION( DisplacementResidualContactCriteria );
/// The base class definition (and it subclasses)
typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
/// The sparse space used
typedef TSparseSpace SparseSpaceType;
/// The table stream definition TODO: Replace by logger
typedef TableStreamUtility::Pointer TablePrinterPointerType;
/// The index type definition
typedef std::size_t IndexType;
/// The key type definition
typedef std::size_t KeyType;
///@}
///@name Life Cycle
///@{
/**
* @brief Default constructor (parameters)
* @param DispRatioTolerance Relative tolerance for displacement residual error
* @param DispAbsTolerance Absolute tolerance for displacement residual error
* @param pTable The pointer to the output table
* @param PrintingOutput If the output is going to be printed in a txt file
*/
explicit DisplacementResidualContactCriteria(
const TDataType DispRatioTolerance,
const TDataType DispAbsTolerance,
const bool PrintingOutput = false
)
: ConvergenceCriteria< TSparseSpace, TDenseSpace >(),
mPrintingOutput(PrintingOutput),
mTableIsInitialized(false)
{
mDispRatioTolerance = DispRatioTolerance;
mDispAbsTolerance = DispAbsTolerance;
mInitialResidualIsSet = false;
}
/**
* @brief Default constructor (parameters)
* @param ThisParameters The configuration parameters
*/
explicit DisplacementResidualContactCriteria( Parameters ThisParameters = Parameters(R"({})"))
: ConvergenceCriteria< TSparseSpace, TDenseSpace >(),
mTableIsInitialized(false)
{
// The default parameters
Parameters default_parameters = Parameters(R"(
{
"ensure_contact" : false,
"print_convergence_criterion" : false,
"residual_relative_tolerance" : 1.0e-4,
"residual_absolute_tolerance" : 1.0e-9
})" );
ThisParameters.ValidateAndAssignDefaults(default_parameters);
// The displacement residual
mDispRatioTolerance = ThisParameters["residual_relative_tolerance"].GetDouble();
mDispAbsTolerance = ThisParameters["residual_absolute_tolerance"].GetDouble();
// Additional flags -> NOTE: Replace for a ral flag?¿
mPrintingOutput = ThisParameters["print_convergence_criterion"].GetBool();
// We "initialize" the flag-> NOTE: Replace for a ral flag?¿
mInitialResidualIsSet = false;
}
//* Copy constructor.
DisplacementResidualContactCriteria( DisplacementResidualContactCriteria const& rOther )
:BaseType(rOther)
,mInitialResidualIsSet(rOther.mInitialResidualIsSet)
,mDispRatioTolerance(rOther.mDispRatioTolerance)
,mDispAbsTolerance(rOther.mDispAbsTolerance)
,mDispInitialResidualNorm(rOther.mDispInitialResidualNorm)
,mDispCurrentResidualNorm(rOther.mDispCurrentResidualNorm)
,mPrintingOutput(rOther.mPrintingOutput)
,mTableIsInitialized(rOther.mTableIsInitialized)
{
}
/// Destructor.
~DisplacementResidualContactCriteria() override = default;
///@}
///@name Operators
///@{
/**
* @brief Compute relative and absolute error.
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
* @return true if convergence is achieved, false otherwise
*/
bool PostCriteria(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
if (SparseSpaceType::Size(rb) != 0) { //if we are solving for something
// Initialize
TDataType disp_residual_solution_norm = 0.0;
IndexType disp_dof_num(0);
// Loop over Dofs
#pragma omp parallel for reduction(+:disp_residual_solution_norm,disp_dof_num)
for (int i = 0; i < static_cast<int>(rDofSet.size()); i++) {
auto it_dof = rDofSet.begin() + i;
std::size_t dof_id;
TDataType residual_dof_value;
if (it_dof->IsFree()) {
dof_id = it_dof->EquationId();
residual_dof_value = rb[dof_id];
const auto curr_var = it_dof->GetVariable();
disp_residual_solution_norm += residual_dof_value * residual_dof_value;
disp_dof_num++;
}
}
mDispCurrentResidualNorm = disp_residual_solution_norm;
TDataType residual_disp_ratio = 1.0;
// We initialize the solution
if (mInitialResidualIsSet == false) {
mDispInitialResidualNorm = (disp_residual_solution_norm == 0.0) ? 1.0 : disp_residual_solution_norm;
residual_disp_ratio = 1.0;
mInitialResidualIsSet = true;
}
// We calculate the ratio of the displacements
residual_disp_ratio = mDispCurrentResidualNorm/mDispInitialResidualNorm;
// We calculate the absolute norms
const TDataType residual_disp_abs = mDispCurrentResidualNorm/disp_dof_num;
// The process info of the model part
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
// We print the results // TODO: Replace for the new log
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
std::cout.precision(4);
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& Table = p_table->GetTable();
Table << residual_disp_ratio << mDispRatioTolerance << residual_disp_abs << mDispAbsTolerance;
} else {
std::cout.precision(4);
if (mPrintingOutput == false) {
KRATOS_INFO("DisplacementResidualContactCriteria") << BOLDFONT("RESIDUAL CONVERGENCE CHECK") << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl << std::scientific;
KRATOS_INFO("DisplacementResidualContactCriteria") << BOLDFONT("\tDISPLACEMENT: RATIO = ") << residual_disp_ratio << BOLDFONT(" EXP.RATIO = ") << mDispRatioTolerance << BOLDFONT(" ABS = ") << residual_disp_abs << BOLDFONT(" EXP.ABS = ") << mDispAbsTolerance << std::endl;
} else {
KRATOS_INFO("DisplacementResidualContactCriteria") << "RESIDUAL CONVERGENCE CHECK" << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl << std::scientific;
KRATOS_INFO("DisplacementResidualContactCriteria") << "\tDISPLACEMENT: RATIO = " << residual_disp_ratio << " EXP.RATIO = " << mDispRatioTolerance << " ABS = " << residual_disp_abs << " EXP.ABS = " << mDispAbsTolerance << std::endl;
}
}
}
r_process_info[CONVERGENCE_RATIO] = residual_disp_ratio;
r_process_info[RESIDUAL_NORM] = residual_disp_abs;
// We check if converged
const bool disp_converged = (residual_disp_ratio <= mDispRatioTolerance || residual_disp_abs <= mDispAbsTolerance);
if (disp_converged) {
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& Table = p_table->GetTable();
if (mPrintingOutput == false)
Table << BOLDFONT(FGRN(" Achieved"));
else
Table << "Achieved";
} else {
if (mPrintingOutput == false)
KRATOS_INFO("DisplacementResidualContactCriteria") << BOLDFONT("\tResidual") << " convergence is " << BOLDFONT(FGRN("achieved")) << std::endl;
else
KRATOS_INFO("DisplacementResidualContactCriteria") << "\tResidual convergence is achieved" << std::endl;
}
}
return true;
} else {
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& table = p_table->GetTable();
if (mPrintingOutput == false)
table << BOLDFONT(FRED(" Not achieved"));
else
table << "Not achieved";
} else {
if (mPrintingOutput == false)
KRATOS_INFO("DisplacementResidualContactCriteria") << BOLDFONT("\tResidual") << " convergence is " << BOLDFONT(FRED(" not achieved")) << std::endl;
else
KRATOS_INFO("DisplacementResidualContactCriteria") << "\tResidual convergence is not achieved" << std::endl;
}
}
return false;
}
} else // In this case all the displacements are imposed!
return true;
}
/**
* @brief This function initialize the convergence criteria
* @param rModelPart Reference to the ModelPart containing the contact problem. (unused)
*/
void Initialize( ModelPart& rModelPart) override
{
BaseType::mConvergenceCriteriaIsInitialized = true;
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
if (r_process_info.Has(TABLE_UTILITY) && mTableIsInitialized == false) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& table = p_table->GetTable();
table.AddColumn("DP RATIO", 10);
table.AddColumn("EXP. RAT", 10);
table.AddColumn("ABS", 10);
table.AddColumn("EXP. ABS", 10);
table.AddColumn("CONVERGENCE", 15);
mTableIsInitialized = true;
}
}
/**
* @brief This function initializes the solution step
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
*/
void InitializeSolutionStep(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
mInitialResidualIsSet = false;
}
///@}
///@name Operations
///@{
///@}
///@name Acces
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Friends
///@{
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
bool mInitialResidualIsSet; /// This "flag" is set in order to set that the initial residual is already computed
bool mPrintingOutput; /// If the colors and bold are printed
bool mTableIsInitialized; /// If the table is already initialized
TDataType mDispRatioTolerance; /// The ratio threshold for the norm of the displacement residual
TDataType mDispAbsTolerance; /// The absolute value threshold for the norm of the displacement residual
TDataType mDispInitialResidualNorm; /// The reference norm of the displacement residual
TDataType mDispCurrentResidualNorm; /// The current norm of the displacement residual
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Unaccessible methods
///@{
///@}
};
///@} // Kratos classes
///@} // Application group
}
#endif /* KRATOS_DISPLACEMENT_RESIDUAL_CONTACT_CRITERIA_H */
|
empty_reduce.c | // RUN: %libomp-compile-and-run | FileCheck %s
// RUN: %libomp-compile -DNOWAIT && %libomp-run | FileCheck %s
// REQUIRES: ompt
// UNSUPPORTED: gcc
#include "callback.h"
#include <omp.h>
#ifdef NOWAIT
#define FOR_CLAUSE nowait
#else
#define FOR_CLAUSE
#endif
int main() {
int sum = 0;
int i;
#pragma omp parallel num_threads(1)
#pragma omp for reduction(+ : sum) FOR_CLAUSE
for (i = 0; i < 10000; i++) {
sum += i;
}
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_parallel_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID:[0-9]+]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID:[0-9]+]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_reduction_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID]],
// CHECK-SAME: codeptr_ra=
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_reduction_end:
// CHECK-SAME: parallel_id=[[PARALLEL_ID]],
// CHECK-SAME: task_id=[[TASK_ID]], codeptr_ra=
return 0;
}
|
hist_util.h | /*!
* Copyright 2017-2020 by Contributors
* \file hist_util.h
* \brief Utility for fast histogram aggregation
* \author Philip Cho, Tianqi Chen
*/
#ifndef XGBOOST_COMMON_HIST_UTIL_H_
#define XGBOOST_COMMON_HIST_UTIL_H_
#include <xgboost/data.h>
#include <xgboost/generic_parameters.h>
#include <limits>
#include <vector>
#include <algorithm>
#include <memory>
#include <utility>
#include <map>
#include "row_set.h"
#include "common.h"
#include "threading_utils.h"
#include "../tree/param.h"
#include "./quantile.h"
#include "./timer.h"
#include "../include/rabit/rabit.h"
namespace xgboost {
namespace common {
/*!
* \brief A single row in global histogram index.
* Directly represent the global index in the histogram entry.
*/
using GHistIndexRow = Span<uint32_t const>;
// A CSC matrix representing histogram cuts, used in CPU quantile hist.
// The cut values represent upper bounds of bins containing approximately equal numbers of elements
class HistogramCuts {
protected:
using BinIdx = uint32_t;
public:
HostDeviceVector<bst_float> cut_values_; // NOLINT
HostDeviceVector<uint32_t> cut_ptrs_; // NOLINT
// storing minimum value in a sketch set.
HostDeviceVector<float> min_vals_; // NOLINT
HistogramCuts();
HistogramCuts(HistogramCuts const& that) {
cut_values_.Resize(that.cut_values_.Size());
cut_ptrs_.Resize(that.cut_ptrs_.Size());
min_vals_.Resize(that.min_vals_.Size());
cut_values_.Copy(that.cut_values_);
cut_ptrs_.Copy(that.cut_ptrs_);
min_vals_.Copy(that.min_vals_);
}
HistogramCuts(HistogramCuts&& that) noexcept(true) {
*this = std::forward<HistogramCuts&&>(that);
}
HistogramCuts& operator=(HistogramCuts const& that) {
cut_values_.Resize(that.cut_values_.Size());
cut_ptrs_.Resize(that.cut_ptrs_.Size());
min_vals_.Resize(that.min_vals_.Size());
cut_values_.Copy(that.cut_values_);
cut_ptrs_.Copy(that.cut_ptrs_);
min_vals_.Copy(that.min_vals_);
return *this;
}
HistogramCuts& operator=(HistogramCuts&& that) noexcept(true) {
cut_ptrs_ = std::move(that.cut_ptrs_);
cut_values_ = std::move(that.cut_values_);
min_vals_ = std::move(that.min_vals_);
return *this;
}
uint32_t FeatureBins(uint32_t feature) const {
return cut_ptrs_.ConstHostVector().at(feature + 1) -
cut_ptrs_.ConstHostVector()[feature];
}
// Getters. Cuts should be of no use after building histogram indices, but currently
// it's deeply linked with quantile_hist, gpu sketcher and gpu_hist. So we preserve
// these for now.
std::vector<uint32_t> const& Ptrs() const { return cut_ptrs_.ConstHostVector(); }
std::vector<float> const& Values() const { return cut_values_.ConstHostVector(); }
std::vector<float> const& MinValues() const { return min_vals_.ConstHostVector(); }
size_t TotalBins() const { return cut_ptrs_.ConstHostVector().back(); }
// Return the index of a cut point that is strictly greater than the input
// value, or the last available index if none exists
BinIdx SearchBin(float value, uint32_t column_id) const {
auto beg = cut_ptrs_.ConstHostVector().at(column_id);
auto end = cut_ptrs_.ConstHostVector().at(column_id + 1);
const auto &values = cut_values_.ConstHostVector();
auto it = std::upper_bound(values.cbegin() + beg, values.cbegin() + end, value);
BinIdx idx = it - values.cbegin();
if (idx == end) {
idx -= 1;
}
return idx;
}
BinIdx SearchBin(Entry const& e) const {
return SearchBin(e.fvalue, e.index);
}
};
inline HistogramCuts SketchOnDMatrix(DMatrix *m, int32_t max_bins) {
HistogramCuts out;
auto const& info = m->Info();
const auto threads = omp_get_max_threads();
std::vector<std::vector<bst_row_t>> column_sizes(threads);
for (auto& column : column_sizes) {
column.resize(info.num_col_, 0);
}
std::vector<bst_row_t> reduced(info.num_col_, 0);
for (auto const& page : m->GetBatches<SparsePage>()) {
auto const &entries_per_column =
HostSketchContainer::CalcColumnSize(page, info.num_col_, threads);
for (size_t i = 0; i < entries_per_column.size(); ++i) {
reduced[i] += entries_per_column[i];
}
}
HostSketchContainer container(reduced, max_bins,
HostSketchContainer::UseGroup(info));
for (auto const &page : m->GetBatches<SparsePage>()) {
container.PushRowPage(page, info);
}
container.MakeCuts(&out);
return out;
}
enum BinTypeSize {
kUint8BinsTypeSize = 1,
kUint16BinsTypeSize = 2,
kUint32BinsTypeSize = 4
};
struct Index {
Index() {
SetBinTypeSize(binTypeSize_);
}
Index(const Index& i) = delete;
Index& operator=(Index i) = delete;
Index(Index&& i) = delete;
Index& operator=(Index&& i) = delete;
uint32_t operator[](size_t i) const {
if (offset_ptr_ != nullptr) {
return func_(data_ptr_, i) + offset_ptr_[i%p_];
} else {
return func_(data_ptr_, i);
}
}
void SetBinTypeSize(BinTypeSize binTypeSize) {
binTypeSize_ = binTypeSize;
switch (binTypeSize) {
case kUint8BinsTypeSize:
func_ = &GetValueFromUint8;
break;
case kUint16BinsTypeSize:
func_ = &GetValueFromUint16;
break;
case kUint32BinsTypeSize:
func_ = &GetValueFromUint32;
break;
default:
CHECK(binTypeSize == kUint8BinsTypeSize ||
binTypeSize == kUint16BinsTypeSize ||
binTypeSize == kUint32BinsTypeSize);
}
}
BinTypeSize GetBinTypeSize() const {
return binTypeSize_;
}
template<typename T>
T* data() const { // NOLINT
return static_cast<T*>(data_ptr_);
}
uint32_t* Offset() const {
return offset_ptr_;
}
size_t OffsetSize() const {
return offset_.size();
}
size_t Size() const {
return data_.size() / (binTypeSize_);
}
void Resize(const size_t nBytesData) {
data_.resize(nBytesData);
data_ptr_ = reinterpret_cast<void*>(data_.data());
}
void ResizeOffset(const size_t nDisps) {
offset_.resize(nDisps);
offset_ptr_ = offset_.data();
p_ = nDisps;
}
std::vector<uint8_t>::const_iterator begin() const { // NOLINT
return data_.begin();
}
std::vector<uint8_t>::const_iterator end() const { // NOLINT
return data_.end();
}
private:
static uint32_t GetValueFromUint8(void *t, size_t i) {
return reinterpret_cast<uint8_t*>(t)[i];
}
static uint32_t GetValueFromUint16(void* t, size_t i) {
return reinterpret_cast<uint16_t*>(t)[i];
}
static uint32_t GetValueFromUint32(void* t, size_t i) {
return reinterpret_cast<uint32_t*>(t)[i];
}
using Func = uint32_t (*)(void*, size_t);
std::vector<uint8_t> data_;
std::vector<uint32_t> offset_; // size of this field is equal to number of features
void* data_ptr_;
BinTypeSize binTypeSize_ {kUint8BinsTypeSize};
size_t p_ {1};
uint32_t* offset_ptr_ {nullptr};
Func func_;
};
/*!
* \brief preprocessed global index matrix, in CSR format
*
* Transform floating values to integer index in histogram This is a global histogram
* index for CPU histogram. On GPU ellpack page is used.
*/
struct GHistIndexMatrix {
/*! \brief row pointer to rows by element position */
std::vector<size_t> row_ptr;
/*! \brief The index data */
Index index;
/*! \brief hit count of each index */
std::vector<size_t> hit_count;
/*! \brief The corresponding cuts */
HistogramCuts cut;
DMatrix* p_fmat;
size_t max_num_bins;
// Create a global histogram matrix, given cut
void Init(DMatrix* p_fmat, int max_num_bins);
// specific method for sparse data as no posibility to reduce allocated memory
template <typename BinIdxType, typename GetOffset>
void SetIndexData(common::Span<BinIdxType> index_data_span,
size_t batch_threads, const SparsePage &batch,
size_t rbegin, size_t nbins, GetOffset get_offset) {
const xgboost::Entry *data_ptr = batch.data.HostVector().data();
const std::vector<bst_row_t> &offset_vec = batch.offset.HostVector();
const size_t batch_size = batch.Size();
CHECK_LT(batch_size, offset_vec.size());
BinIdxType* index_data = index_data_span.data();
#pragma omp parallel for num_threads(batch_threads) schedule(static)
for (omp_ulong i = 0; i < batch_size; ++i) {
const int tid = omp_get_thread_num();
size_t ibegin = row_ptr[rbegin + i];
size_t iend = row_ptr[rbegin + i + 1];
const size_t size = offset_vec[i + 1] - offset_vec[i];
SparsePage::Inst inst = {data_ptr + offset_vec[i], size};
CHECK_EQ(ibegin + inst.size(), iend);
for (bst_uint j = 0; j < inst.size(); ++j) {
uint32_t idx = cut.SearchBin(inst[j]);
index_data[ibegin + j] = get_offset(idx, j);
++hit_count_tloc_[tid * nbins + idx];
}
}
}
void ResizeIndex(const size_t n_index,
const bool isDense);
inline void GetFeatureCounts(size_t* counts) const {
auto nfeature = cut.Ptrs().size() - 1;
for (unsigned fid = 0; fid < nfeature; ++fid) {
auto ibegin = cut.Ptrs()[fid];
auto iend = cut.Ptrs()[fid + 1];
for (auto i = ibegin; i < iend; ++i) {
counts[fid] += hit_count[i];
}
}
}
inline bool IsDense() const {
return isDense_;
}
private:
std::vector<size_t> hit_count_tloc_;
bool isDense_;
};
template <typename GradientIndex>
int32_t XGBOOST_HOST_DEV_INLINE BinarySearchBin(bst_uint begin, bst_uint end,
GradientIndex const &data,
uint32_t const fidx_begin,
uint32_t const fidx_end) {
uint32_t previous_middle = std::numeric_limits<uint32_t>::max();
while (end != begin) {
auto middle = begin + (end - begin) / 2;
if (middle == previous_middle) {
break;
}
previous_middle = middle;
auto gidx = data[middle];
if (gidx >= fidx_begin && gidx < fidx_end) {
return static_cast<int32_t>(gidx);
} else if (gidx < fidx_begin) {
begin = middle;
} else {
end = middle;
}
}
// Value is missing
return -1;
}
struct GHistIndexBlock {
const size_t* row_ptr;
const uint32_t* index;
inline GHistIndexBlock(const size_t* row_ptr, const uint32_t* index)
: row_ptr(row_ptr), index(index) {}
// get i-th row
inline GHistIndexRow operator[](size_t i) const {
return {&index[0] + row_ptr[i], row_ptr[i + 1] - row_ptr[i]};
}
};
class ColumnMatrix;
class GHistIndexBlockMatrix {
public:
void Init(const GHistIndexMatrix& gmat,
const ColumnMatrix& colmat,
const tree::TrainParam& param);
inline GHistIndexBlock operator[](size_t i) const {
return {blocks_[i].row_ptr_begin, blocks_[i].index_begin};
}
inline size_t GetNumBlock() const {
return blocks_.size();
}
private:
std::vector<size_t> row_ptr_;
std::vector<uint32_t> index_;
const HistogramCuts* cut_;
struct Block {
const size_t* row_ptr_begin;
const size_t* row_ptr_end;
const uint32_t* index_begin;
const uint32_t* index_end;
};
std::vector<Block> blocks_;
};
template<typename GradientSumT>
using GHistRow = Span<xgboost::detail::GradientPairInternal<GradientSumT> >;
/*!
* \brief fill a histogram by zeros
*/
template<typename GradientSumT>
void InitilizeHistByZeroes(GHistRow<GradientSumT> hist, size_t begin, size_t end);
/*!
* \brief Increment hist as dst += add in range [begin, end)
*/
template<typename GradientSumT>
void IncrementHist(GHistRow<GradientSumT> dst, const GHistRow<GradientSumT> add,
size_t begin, size_t end);
/*!
* \brief Copy hist from src to dst in range [begin, end)
*/
template<typename GradientSumT>
void CopyHist(GHistRow<GradientSumT> dst, const GHistRow<GradientSumT> src,
size_t begin, size_t end);
/*!
* \brief Compute Subtraction: dst = src1 - src2 in range [begin, end)
*/
template<typename GradientSumT>
void SubtractionHist(GHistRow<GradientSumT> dst, const GHistRow<GradientSumT> src1,
const GHistRow<GradientSumT> src2,
size_t begin, size_t end);
/*!
* \brief histogram of gradient statistics for multiple nodes
*/
template<typename GradientSumT>
class HistCollection {
public:
using GHistRowT = GHistRow<GradientSumT>;
using GradientPairT = xgboost::detail::GradientPairInternal<GradientSumT>;
// access histogram for i-th node
GHistRowT operator[](bst_uint nid) const {
constexpr uint32_t kMax = std::numeric_limits<uint32_t>::max();
CHECK_NE(row_ptr_[nid], kMax);
GradientPairT* ptr =
const_cast<GradientPairT*>(dmlc::BeginPtr(data_) + row_ptr_[nid]);
return {ptr, nbins_};
}
// have we computed a histogram for i-th node?
bool RowExists(bst_uint nid) const {
const uint32_t k_max = std::numeric_limits<uint32_t>::max();
return (nid < row_ptr_.size() && row_ptr_[nid] != k_max);
}
// initialize histogram collection
void Init(uint32_t nbins) {
if (nbins_ != nbins) {
nbins_ = nbins;
// quite expensive operation, so let's do this only once
data_.clear();
}
row_ptr_.clear();
n_nodes_added_ = 0;
}
// create an empty histogram for i-th node
void AddHistRow(bst_uint nid) {
constexpr uint32_t kMax = std::numeric_limits<uint32_t>::max();
if (nid >= row_ptr_.size()) {
row_ptr_.resize(nid + 1, kMax);
}
CHECK_EQ(row_ptr_[nid], kMax);
if (data_.size() < nbins_ * (nid + 1)) {
data_.resize(nbins_ * (nid + 1));
}
row_ptr_[nid] = nbins_ * n_nodes_added_;
n_nodes_added_++;
}
private:
/*! \brief number of all bins over all features */
uint32_t nbins_ = 0;
/*! \brief amount of active nodes in hist collection */
uint32_t n_nodes_added_ = 0;
std::vector<GradientPairT> data_;
/*! \brief row_ptr_[nid] locates bin for histogram of node nid */
std::vector<size_t> row_ptr_;
};
/*!
* \brief Stores temporary histograms to compute them in parallel
* Supports processing multiple tree-nodes for nested parallelism
* Able to reduce histograms across threads in efficient way
*/
template<typename GradientSumT>
class ParallelGHistBuilder {
public:
using GHistRowT = GHistRow<GradientSumT>;
void Init(size_t nbins) {
if (nbins != nbins_) {
hist_buffer_.Init(nbins);
nbins_ = nbins;
}
}
// Add new elements if needed, mark all hists as unused
// targeted_hists - already allocated hists which should contain final results after Reduce() call
void Reset(size_t nthreads, size_t nodes, const BlockedSpace2d& space,
const std::vector<GHistRowT>& targeted_hists) {
hist_buffer_.Init(nbins_);
tid_nid_to_hist_.clear();
hist_memory_.clear();
threads_to_nids_map_.clear();
targeted_hists_ = targeted_hists;
CHECK_EQ(nodes, targeted_hists.size());
nodes_ = nodes;
nthreads_ = nthreads;
MatchThreadsToNodes(space);
AllocateAdditionalHistograms();
MatchNodeNidPairToHist();
hist_was_used_.resize(nthreads * nodes_);
std::fill(hist_was_used_.begin(), hist_was_used_.end(), static_cast<int>(false));
}
// Get specified hist, initialize hist by zeros if it wasn't used before
GHistRowT GetInitializedHist(size_t tid, size_t nid) {
CHECK_LT(nid, nodes_);
CHECK_LT(tid, nthreads_);
size_t idx = tid_nid_to_hist_.at({tid, nid});
GHistRowT hist = hist_memory_[idx];
if (!hist_was_used_[tid * nodes_ + nid]) {
InitilizeHistByZeroes(hist, 0, hist.size());
hist_was_used_[tid * nodes_ + nid] = static_cast<int>(true);
}
return hist;
}
// Reduce following bins (begin, end] for nid-node in dst across threads
void ReduceHist(size_t nid, size_t begin, size_t end) {
CHECK_GT(end, begin);
CHECK_LT(nid, nodes_);
GHistRowT dst = targeted_hists_[nid];
bool is_updated = false;
for (size_t tid = 0; tid < nthreads_; ++tid) {
if (hist_was_used_[tid * nodes_ + nid]) {
is_updated = true;
const size_t idx = tid_nid_to_hist_.at({tid, nid});
GHistRowT src = hist_memory_[idx];
if (dst.data() != src.data()) {
IncrementHist(dst, src, begin, end);
}
}
}
if (!is_updated) {
// In distributed mode - some tree nodes can be empty on local machines,
// So we need just set local hist by zeros in this case
InitilizeHistByZeroes(dst, begin, end);
}
}
protected:
void MatchThreadsToNodes(const BlockedSpace2d& space) {
const size_t space_size = space.Size();
const size_t chunck_size = space_size / nthreads_ + !!(space_size % nthreads_);
threads_to_nids_map_.resize(nthreads_ * nodes_, false);
for (size_t tid = 0; tid < nthreads_; ++tid) {
size_t begin = chunck_size * tid;
size_t end = std::min(begin + chunck_size, space_size);
if (begin < space_size) {
size_t nid_begin = space.GetFirstDimension(begin);
size_t nid_end = space.GetFirstDimension(end-1);
for (size_t nid = nid_begin; nid <= nid_end; ++nid) {
// true - means thread 'tid' will work to compute partial hist for node 'nid'
threads_to_nids_map_[tid * nodes_ + nid] = true;
}
}
}
}
void AllocateAdditionalHistograms() {
size_t hist_allocated_additionally = 0;
for (size_t nid = 0; nid < nodes_; ++nid) {
int nthreads_for_nid = 0;
for (size_t tid = 0; tid < nthreads_; ++tid) {
if (threads_to_nids_map_[tid * nodes_ + nid]) {
nthreads_for_nid++;
}
}
// In distributed mode - some tree nodes can be empty on local machines,
// set nthreads_for_nid to 0 in this case.
// In another case - allocate additional (nthreads_for_nid - 1) histograms,
// because one is already allocated externally (will store final result for the node).
hist_allocated_additionally += std::max<int>(0, nthreads_for_nid - 1);
}
for (size_t i = 0; i < hist_allocated_additionally; ++i) {
hist_buffer_.AddHistRow(i);
}
}
void MatchNodeNidPairToHist() {
size_t hist_total = 0;
size_t hist_allocated_additionally = 0;
for (size_t nid = 0; nid < nodes_; ++nid) {
bool first_hist = true;
for (size_t tid = 0; tid < nthreads_; ++tid) {
if (threads_to_nids_map_[tid * nodes_ + nid]) {
if (first_hist) {
hist_memory_.push_back(targeted_hists_[nid]);
first_hist = false;
} else {
hist_memory_.push_back(hist_buffer_[hist_allocated_additionally]);
hist_allocated_additionally++;
}
// map pair {tid, nid} to index of allocated histogram from hist_memory_
tid_nid_to_hist_[{tid, nid}] = hist_total++;
CHECK_EQ(hist_total, hist_memory_.size());
}
}
}
}
/*! \brief number of bins in each histogram */
size_t nbins_ = 0;
/*! \brief number of threads for parallel computation */
size_t nthreads_ = 0;
/*! \brief number of nodes which will be processed in parallel */
size_t nodes_ = 0;
/*! \brief Buffer for additional histograms for Parallel processing */
HistCollection<GradientSumT> hist_buffer_;
/*!
* \brief Marks which hists were used, it means that they should be merged.
* Contains only {true or false} values
* but 'int' is used instead of 'bool', because std::vector<bool> isn't thread safe
*/
std::vector<int> hist_was_used_;
/*! \brief Buffer for additional histograms for Parallel processing */
std::vector<bool> threads_to_nids_map_;
/*! \brief Contains histograms for final results */
std::vector<GHistRowT> targeted_hists_;
/*! \brief Allocated memory for histograms used for construction */
std::vector<GHistRowT> hist_memory_;
/*! \brief map pair {tid, nid} to index of allocated histogram from hist_memory_ */
std::map<std::pair<size_t, size_t>, size_t> tid_nid_to_hist_;
};
/*!
* \brief builder for histograms of gradient statistics
*/
template<typename GradientSumT>
class GHistBuilder {
public:
using GHistRowT = GHistRow<GradientSumT>;
GHistBuilder() = default;
GHistBuilder(size_t nthread, uint32_t nbins) : nthread_{nthread}, nbins_{nbins} {}
// construct a histogram via histogram aggregation
void BuildHist(const std::vector<GradientPair>& gpair,
const RowSetCollection::Elem row_indices,
const GHistIndexMatrix& gmat,
GHistRowT hist,
bool isDense);
// same, with feature grouping
void BuildBlockHist(const std::vector<GradientPair>& gpair,
const RowSetCollection::Elem row_indices,
const GHistIndexBlockMatrix& gmatb,
GHistRowT hist);
// construct a histogram via subtraction trick
void SubtractionTrick(GHistRowT self,
GHistRowT sibling,
GHistRowT parent);
uint32_t GetNumBins() const {
return nbins_;
}
private:
/*! \brief number of threads for parallel computation */
size_t nthread_ { 0 };
/*! \brief number of all bins over all features */
uint32_t nbins_ { 0 };
};
} // namespace common
} // namespace xgboost
#endif // XGBOOST_COMMON_HIST_UTIL_H_
|
mpi-omp-pie-calculation100.c | /****************************************************************************
C-DAC Tech Workshop : HeGaPa-2012
July 16-20,2012
Example 2 : Mpi-Omp_PI_Calculation.c
Objective : Write an MPI-OpenMP Program to compute numerical
integration of PI value
Input : The number of intervals.
Output : The calculated value of PI with Abs value.
Created : MAY-2012
****************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "mpi.h"
#include <omp.h>
#define INTERVALS 1000 //Originally 1000
double
func(double x)
{
return (4.0 / (1.0 + x * x));
}
/* Main Program */
int
main(int argc, char *argv[])
{
int NoIntervals, interval, ScatterSize, Noofthreads;
int MyRank, Numprocs,threadid,start,end;
int iproc, Root = 0, valid;
int Source, Source_tag;
int Destination, Destination_tag;
double PI25DT = 3.141592653589793238462643;
double mypi, pi, h, totalsum, x,partialsum,finalsum;
MPI_Status status;
/* ....MPI Initialisation.... */
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &Numprocs);
MPI_Comm_rank(MPI_COMM_WORLD, &MyRank);
if (MyRank == Root) {
printf("Enter The NoIntervals Of Intervals: ");
/* scanf("%d", &NoIntervals); */
NoIntervals=INTERVALS;
}
MPI_Bcast(&NoIntervals, 1, MPI_INT, Root, MPI_COMM_WORLD);
if (NoIntervals <= 0) {
if (MyRank == Root)
printf("Invalid Value For Number Of Intervals .....\n");
MPI_Finalize();
exit(-1);
}
/* Checking For Equal Division Of Intervals Across The Processors */
if (NoIntervals % Numprocs != 0) {
if (MyRank == Root)
printf("NoofIntervals Cannot Be Evenly Distributed \n");
MPI_Finalize();
exit(0);
}
if (MyRank == Root)
ScatterSize = NoIntervals / Numprocs;
MPI_Bcast(&ScatterSize, 1, MPI_INT, Root, MPI_COMM_WORLD);
/*printf("the ScatterSize is %d",ScatterSize);*/
h = 1.0 / (double) NoIntervals;
totalsum = 0.0;
finalsum = 0.0;
/* OpenMP Parallel For Directive And Critical Section */
partialsum=0.0;
start=MyRank * ScatterSize + 1 ;
end=MyRank * ScatterSize + ScatterSize;
omp_set_num_threads(4);
/*
Originally was ...
#pragma omp parallel for private(x,interval)
*/
#pragma omp parallel for private(x,interval) reduction(+ : partialsum)
for (interval = start; interval <= end; interval=interval+1)
{
x = h * ((double) interval - 0.5);
/*
Originally had ...
#pragma omp critical
replaced with reduction
*/
partialsum = partialsum + func(x);
/*printf("the threadid Myrank and partial sum for each threadis %d %d %f\n",omp_get_thread_num(),MyRank,partialsum); */
}
mypi = partialsum * h;
/* printf("the each process totalsum and mypi value for each rank is %f %f %d\n",totalsum,mypi,MyRank); */
/* ....Collect The Areas Calculated In P0,P1.... */
MPI_Reduce(&mypi, &pi, 1, MPI_DOUBLE, MPI_SUM, Root, MPI_COMM_WORLD);
if (MyRank == Root) {
printf("\nPi Is Approximately %.16f, Error Is %.16f \n",
pi, fabs(pi - PI25DT));
}
/* MPI_Termination */
MPI_Finalize();
}
|
conv_direct_hcl_x86.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: qtang@openailab.com
*/
#include "convolution_param.h"
#include "graph/tensor.h"
#include "graph/node.h"
#include "graph/graph.h"
#include "utility/sys_port.h"
#include "utility/float.h"
#include "utility/log.h"
#include "device/cpu/cpu_node.h"
#include "device/cpu/cpu_graph.h"
#include "device/cpu/cpu_module.h"
#include <math.h>
#include <string.h>
static void pad_int8(int8_t* input, int8_t* output, int in_h, int in_w, int out_h, int out_w, int top, int left, int8_t v)
{
int8_t* ptr = input;
int8_t* outptr = output;
int y = 0;
// fill top
for (; y < top; y++)
{
int x = 0;
for (; x < out_w; x++)
{
outptr[x] = v;
}
outptr += out_w;
}
// fill center
for (; y < (top + in_h); y++)
{
int x = 0;
for (; x < left; x++)
{
outptr[x] = v;
}
if (in_w < 12)
{
for (; x < (left + in_w); x++)
{
outptr[x] = ptr[x - left];
}
}
else
{
memcpy(outptr + left, ptr, in_w * sizeof(int8_t));
x += in_w;
}
for (; x < out_w; x++)
{
outptr[x] = v;
}
ptr += in_w;
outptr += out_w;
}
// fill bottom
for (; y < out_h; y++)
{
int x = 0;
for (; x < out_w; x++)
{
outptr[x] = v;
}
outptr += out_w;
}
}
static int conv3x3s1_int8_sse(struct tensor* input_tensor, struct tensor* weight_tensor, struct tensor* bias_tensor,
struct tensor* output_tensor, struct conv_param* param, int num_thread)
{
int inch = input_tensor->dims[1];
int inh = input_tensor->dims[2];
int inw = input_tensor->dims[3];
int in_hw = inh * inw;
int outch = output_tensor->dims[1];
int outh = output_tensor->dims[2];
int outw = output_tensor->dims[3];
int out_hw = outh * outw;
int out_size = output_tensor->elem_num;
int pad_w = param->pad_w0;
int pad_h = param->pad_h0;
int32_t* output_int32 = (int32_t*)sys_malloc(out_size * sizeof(int32_t));
memset(output_int32, 0, out_size * sizeof(int32_t));
float* output_fp32 = (float*)sys_malloc(out_size * sizeof(float));
int8_t* output_int8 = output_tensor->data;
int8_t* input_int8 = input_tensor->data;
int32_t* bias_int32 = NULL;
if(bias_tensor)
bias_int32 = bias_tensor->data;
/* get scale value of quantizaiton */
float input_scale = input_tensor->scale;
float* kernel_scales = weight_tensor->scale_list;
float output_scale = output_tensor->scale;
const signed char* kernel = weight_tensor->data;
/* pading */
int inh_tmp = inh + pad_h + pad_h;
int inw_tmp = inw + pad_w + pad_w;
int8_t* input_tmp = NULL;
if (inh_tmp == inh && inw_tmp == inw)
input_tmp = input_int8;
else
{
input_tmp = ( int8_t* )sys_malloc(inh_tmp * inw_tmp * inch * sizeof(int8_t));
#pragma omp parallel for num_threads(num_thread)
for (int g = 0; g < inch; g++)
{
int8_t* pad_in = input_int8 + g * inh * inw;
int8_t* pad_out = input_tmp + g * inh_tmp * inw_tmp;
pad_int8(pad_in, pad_out, inh, inw, inh_tmp, inw_tmp, pad_h, pad_w, 0);
}
}
#pragma omp parallel for num_threads(num_thread)
for (int p = 0; p < outch; p++)
{
int32_t* out0 = output_int32 + p * out_hw;
int8_t* kernel0 = (int8_t* )kernel + p * inch * 9;
for (int q = 0; q < inch; q++)
{
int* outptr0 = out0;
int8_t* img0 = input_tmp + q * inw_tmp * inh_tmp;
int8_t* r0 = img0;
int8_t* r1 = img0 + inw_tmp;
int8_t* r2 = img0 + inw_tmp * 2;
for (int i = 0; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
int sum0 = 0;
sum0 += ( int )r0[0] * kernel0[0];
sum0 += ( int )r0[1] * kernel0[1];
sum0 += ( int )r0[2] * kernel0[2];
sum0 += ( int )r1[0] * kernel0[3];
sum0 += ( int )r1[1] * kernel0[4];
sum0 += ( int )r1[2] * kernel0[5];
sum0 += ( int )r2[0] * kernel0[6];
sum0 += ( int )r2[1] * kernel0[7];
sum0 += ( int )r2[2] * kernel0[8];
*outptr0 += sum0;
r0++;
r1++;
r2++;
outptr0++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
kernel0 += 9;
}
}
/* process bias and dequant output from int32 to fp32 */
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < outch; i++)
{
for (int j = 0; j < outh * outw; j++)
{
int output_off = i * (outh * outw) + j;
if (bias_tensor)
output_fp32[output_off] = (float )(output_int32[output_off] + bias_int32[i]) * input_scale * kernel_scales[i];
else
output_fp32[output_off] = (float )output_int32[output_off] * input_scale * kernel_scales[i];
}
}
/* process activation relu */
if (param->activation == 0)
{
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < outch; i++)
{
for (int j = 0; j < outh * outw; j++)
{
int output_off = i * (outh * outw) + j;
if (output_fp32[output_off] < 0)
output_fp32[output_off] = 0;
}
}
}
/* process activation relu6 */
if (param->activation > 0)
{
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < outch; i++)
{
for (int j = 0; j < outh * outw; j++)
{
int output_off = i * (outh * outw) + j;
if (output_fp32[output_off] < 0)
output_fp32[output_off] = 0;
if (output_fp32[output_off] > 6)
output_fp32[output_off] = 6;
}
}
}
/* quant from fp32 to int8 */
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < outch; i++)
{
for (int j = 0; j < outh * outw; j++)
{
int output_off = i * (outh * outw) + j;
int32_t data_i32 = ( int32_t )(round(output_fp32[output_off] / output_scale));
if (data_i32 > 127)
data_i32 = 127;
else if (data_i32 < -127)
data_i32 = -127;
output_int8[output_off] = (int8_t)data_i32;
}
}
sys_free(output_int32);
sys_free(output_fp32);
if (!(inh_tmp == inh && inw_tmp == inw))
sys_free(input_tmp);
return 0;
}
static int conv3x3s2_int8_sse(struct tensor* input_tensor, struct tensor* weight_tensor, struct tensor* bias_tensor,
struct tensor* output_tensor, struct conv_param* param, int num_thread)
{
int inch = input_tensor->dims[1];
int inh = input_tensor->dims[2];
int inw = input_tensor->dims[3];
int in_hw = inh * inw;
int outch = output_tensor->dims[1];
int outh = output_tensor->dims[2];
int outw = output_tensor->dims[3];
int out_hw = outh * outw;
int out_size = output_tensor->elem_num;
int pad_w = param->pad_w0;
int pad_h = param->pad_h0;
int32_t* output_int32 = (int32_t*)sys_malloc(out_size * sizeof(int32_t));
memset(output_int32, 0, out_size * sizeof(int32_t));
float* output_fp32 = (float*)sys_malloc(out_size * sizeof(float));
int8_t* output_int8 = output_tensor->data;
int8_t* input_int8 = input_tensor->data;
int32_t* bias_int32 = NULL;
if(bias_tensor)
bias_int32 = bias_tensor->data;
/* get scale value of quantizaiton */
float input_scale = input_tensor->scale;
float* kernel_scales = weight_tensor->scale_list;
float output_scale = output_tensor->scale;
const signed char* kernel = weight_tensor->data;
/* pading */
int inh_tmp = inh + pad_h + pad_h;
int inw_tmp = inw + pad_w + pad_w;
int8_t* input_tmp = NULL;
if (inh_tmp == inh && inw_tmp == inw)
input_tmp = input_int8;
else
{
input_tmp = ( int8_t* )sys_malloc(inh_tmp * inw_tmp * inch * sizeof(int8_t));
#pragma omp parallel for num_threads(num_thread)
for (int g = 0; g < inch; g++)
{
int8_t* pad_in = input_int8 + g * inh * inw;
int8_t* pad_out = input_tmp + g * inh_tmp * inw_tmp;
pad_int8(pad_in, pad_out, inh, inw, inh_tmp, inw_tmp, pad_h, pad_w, 0);
}
}
int tailstep = inw_tmp - 2 * outw + inw_tmp;
#pragma omp parallel for num_threads(num_thread)
for (int p = 0; p < outch; p++)
{
int32_t* out0 = output_int32 + p * out_hw;
int8_t* kernel0 = (int8_t* )kernel + p * inch * 9;
for (int q = 0; q < inch; q++)
{
int* outptr0 = out0;
int8_t* img0 = input_tmp + q * inw_tmp * inh_tmp;
int8_t* r0 = img0;
int8_t* r1 = img0 + inw_tmp;
int8_t* r2 = img0 + inw_tmp * 2;
for (int i = 0; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
int sum0 = 0;
sum0 += ( int )r0[0] * kernel0[0];
sum0 += ( int )r0[1] * kernel0[1];
sum0 += ( int )r0[2] * kernel0[2];
sum0 += ( int )r1[0] * kernel0[3];
sum0 += ( int )r1[1] * kernel0[4];
sum0 += ( int )r1[2] * kernel0[5];
sum0 += ( int )r2[0] * kernel0[6];
sum0 += ( int )r2[1] * kernel0[7];
sum0 += ( int )r2[2] * kernel0[8];
*outptr0 += sum0;
r0 += 2;
r1 += 2;
r2 += 2;
outptr0++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
kernel0 += 9;
}
}
/* process bias and dequant output from int32 to fp32 */
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < outch; i++)
{
for (int j = 0; j < outh * outw; j++)
{
int output_off = i * (outh * outw) + j;
if (bias_tensor)
output_fp32[output_off] = (float )(output_int32[output_off] + bias_int32[i]) * input_scale * kernel_scales[i];
else
output_fp32[output_off] = (float )output_int32[output_off] * input_scale * kernel_scales[i];
}
}
/* process activation relu */
if (param->activation == 0)
{
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < outch; i++)
{
for (int j = 0; j < outh * outw; j++)
{
int output_off = i * (outh * outw) + j;
if (output_fp32[output_off] < 0)
output_fp32[output_off] = 0;
}
}
}
/* process activation relu6 */
if (param->activation > 0)
{
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < outch; i++)
{
for (int j = 0; j < outh * outw; j++)
{
int output_off = i * (outh * outw) + j;
if (output_fp32[output_off] < 0)
output_fp32[output_off] = 0;
if (output_fp32[output_off] > 6)
output_fp32[output_off] = 6;
}
}
}
/* quant from fp32 to int8 */
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < outch; i++)
{
for (int j = 0; j < outh * outw; j++)
{
int output_off = i * (outh * outw) + j;
int32_t data_i32 = ( int32_t )(round(output_fp32[output_off] / output_scale));
if (data_i32 > 127)
data_i32 = 127;
else if (data_i32 < -127)
data_i32 = -127;
output_int8[output_off] = (int8_t)data_i32;
}
}
sys_free(output_int32);
sys_free(output_fp32);
if (!(inh_tmp == inh && inw_tmp == inw))
sys_free(input_tmp);
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* input_tensor;
struct tensor* weight_tensor;
struct tensor* bias_tensor = NULL;
struct tensor* output_tensor = NULL;
int num_thread = exec_graph->num_thread;
/* set the input data and shape again, in case of reshape or dynamic shape */
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
weight_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[1]);
if (ir_node->input_num > 2)
bias_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[2]);
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
struct conv_param* conv_param = ( struct conv_param* )ir_node->op.param_mem;
int ret = -1;
switch(conv_param->stride_h)
{
case 1:
ret = conv3x3s1_int8_sse(input_tensor, weight_tensor, bias_tensor, output_tensor, conv_param, num_thread);
break;
case 2:
ret = conv3x3s2_int8_sse(input_tensor, weight_tensor, bias_tensor, output_tensor, conv_param, num_thread);
break;
default:
TLOG_ERR("Direct Convolution Int8 not support the stride %d\n", conv_param->stride_h);
}
return ret;
}
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node)
{
struct conv_param* param = ( struct conv_param* )exec_node->op.param_mem;
struct node* ir_node = exec_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* input_tensor;
int group = param->group;
int kernel_h = param->kernel_h;
int kernel_w = param->kernel_w;
int stride_h = param->stride_h;
int stride_w = param->stride_w;
int dilation_h = param->dilation_h;
int dilation_w = param->dilation_w;
int pad_h0 = param->pad_h0;
int pad_w0 = param->pad_w0;
int pad_h1 = param->pad_h1;
int pad_w1 = param->pad_w1;
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
/* only support int8 */
if (input_tensor->data_type != TENGINE_DT_INT8)
return 0;
if (group == 1 && pad_h0 == pad_h1 && pad_w0 == pad_w1 && dilation_h == 1 && dilation_w == 1 && kernel_h == 3 && kernel_w == 3 &&
((stride_h == 1 && stride_w == 1) || (stride_h == 2 && stride_w == 2)))
return OPS_SCORE_BEST * 2;
else
return 0;
}
static struct node_ops hcl_node_ops = {.prerun = NULL,
.run = run,
.reshape = NULL,
.postrun = NULL,
.init_node = init_node,
.release_node = release_node,
.score = score};
int register_conv_direct_hcl_x86_op()
{
return register_builtin_node_ops(OP_CONV, &hcl_node_ops);
}
int unregister_conv_direct_hcl_x86_op()
{
unregister_builtin_node_ops(OP_CONV, &hcl_node_ops);
return 0;
}
|
logit_loss.h | /**
* Copyright (c) 2015 by Contributors
*/
#ifndef ZDIFACTO_LOSS_LOGIT_LOSS_H_
#define ZDIFACTO_LOSS_LOGIT_LOSS_H_
#include <vector>
#include <cmath>
#include "zdifacto/base.h"
#include "zdifacto/loss.h"
#include "dmlc/data.h"
#include "dmlc/omp.h"
#include "common/spmv.h"
namespace zdifacto {
/**
* \brief the logistic loss
*
* :math:`\ell(x,y,w) = log(1 + exp(- y <w, x>))`
*
*/
class LogitLoss : public Loss {
public:
LogitLoss() {}
virtual ~LogitLoss() {}
KWArgs Init(const KWArgs& kwargs) override {
return kwargs;
}
/**
* \brief perform prediction
*
* pred += X * w
*
* @param data the data X
* @param param input parameters
* - param[0], real_t vector, the weights
* - param[1], optional int vector, the weight positions
* @param pred predict output, should be pre-allocated
*/
void Predict(const dmlc::RowBlock<unsigned>& data,
const std::vector<SArray<char>>& param,
SArray<real_t>* pred) override {
int psize = param.size();
CHECK_GE(psize, 1); CHECK_LE(psize, 2);
SArray<real_t> w(param[0]);
SArray<int> w_pos = psize == 2 ? SArray<int>(param[1]) : SArray<int>();
SpMV::Times(data, w, pred, nthreads_, w_pos, {});
}
/*!
* \brief compute the gradients
*
* p = - y ./ (1 + exp (y .* pred));
* grad += X' * p;
*
* @param data the data X
* @param param input parameters
* - param[0], real_t vector, the predict output
* - param[1], optional int vector, the gradient positions
* @param grad the results, should be pre-allocated
*/
void CalcGrad(const dmlc::RowBlock<unsigned>& data,
const std::vector<SArray<char>>& param,
SArray<real_t>* grad) override {
int psize = param.size();
CHECK_GE(psize, 1);
CHECK_LE(psize, 2);
SArray<real_t> p; p.CopyFrom(SArray<real_t>(param[0]));
SArray<int> grad_pos = psize == 2 ? SArray<int>(param[1]) : SArray<int>();
// p = ...
CHECK_NOTNULL(data.label);
#pragma omp parallel for num_threads(nthreads_)
for (size_t i = 0; i < p.size(); ++i) {
real_t y = data.label[i] > 0 ? 1 : -1;
p[i] = - y / (1 + std::exp(y * p[i]));
}
// grad += ...
SpMV::TransTimes(data, p, grad, nthreads_, {}, grad_pos);
}
};
}
#endif
|
GB_binop__bxnor_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bxnor_uint32)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__bxnor_uint32)
// A.*B function (eWiseMult): GB (_AemultB_03__bxnor_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bxnor_uint32)
// A*D function (colscale): GB (_AxD__bxnor_uint32)
// D*A function (rowscale): GB (_DxB__bxnor_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__bxnor_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__bxnor_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxnor_uint32)
// C=scalar+B GB (_bind1st__bxnor_uint32)
// C=scalar+B' GB (_bind1st_tran__bxnor_uint32)
// C=A+scalar GB (_bind2nd__bxnor_uint32)
// C=A'+scalar GB (_bind2nd_tran__bxnor_uint32)
// C type: uint32_t
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = ~((aij) ^ (bij))
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = ~((x) ^ (y)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BXNOR || GxB_NO_UINT32 || GxB_NO_BXNOR_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bxnor_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bxnor_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bxnor_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__bxnor_uint32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__bxnor_uint32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bxnor_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__bxnor_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bxnor_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__bxnor_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bxnor_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bxnor_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = Bx [p] ;
Cx [p] = ~((x) ^ (bij)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bxnor_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = Ax [p] ;
Cx [p] = ~((aij) ^ (y)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = ~((x) ^ (aij)) ; \
}
GrB_Info GB (_bind1st_tran__bxnor_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = ~((aij) ^ (y)) ; \
}
GrB_Info GB (_bind2nd_tran__bxnor_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__lor_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lor_fp64)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__lor_fp64)
// A.*B function (eWiseMult): GB (_AemultB_03__lor_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lor_fp64)
// A*D function (colscale): GB (_AxD__lor_fp64)
// D*A function (rowscale): GB (_DxB__lor_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__lor_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__lor_fp64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lor_fp64)
// C=scalar+B GB (_bind1st__lor_fp64)
// C=scalar+B' GB (_bind1st_tran__lor_fp64)
// C=A+scalar GB (_bind2nd__lor_fp64)
// C=A'+scalar GB (_bind2nd_tran__lor_fp64)
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = ((aij != 0) || (bij != 0))
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
double bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = ((x != 0) || (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOR || GxB_NO_FP64 || GxB_NO_LOR_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__lor_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lor_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lor_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lor_fp64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lor_fp64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lor_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__lor_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lor_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__lor_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lor_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lor_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = Bx [p] ;
Cx [p] = ((x != 0) || (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lor_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = Ax [p] ;
Cx [p] = ((aij != 0) || (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = ((x != 0) || (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__lor_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = ((aij != 0) || (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__lor_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
channel.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC H H AAA N N N N EEEEE L %
% C H H A A NN N NN N E L %
% C HHHHH AAAAA N N N N N N EEE L %
% C H H A A N NN N NN E L %
% CCCC H H A A N N N N EEEEE LLLLL %
% %
% %
% MagickCore Image Channel Methods %
% %
% Software Design %
% Cristy %
% December 2003 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/channel.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/image.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h a n n e l F x I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ChannelFxImage() applies a channel expression to the specified image. The
% expression consists of one or more channels, either mnemonic or numeric (e.g.
% red, 1), separated by actions as follows:
%
% <=> exchange two channels (e.g. red<=>blue)
% => copy one channel to another channel (e.g. red=>green)
% = assign a constant value to a channel (e.g. red=50%)
% , write new image channels in the specified order (e.g. red, green)
% | add a new output image for the next set of channel operations
% ; move to the next input image for the source of channel data
%
% For example, to create 3 grayscale images from the red, green, and blue
% channels of an image, use:
%
% -channel-fx "red; green; blue"
%
% A channel without an operation symbol implies separate (i.e, semicolon).
%
% The format of the ChannelFxImage method is:
%
% Image *ChannelFxImage(const Image *image,const char *expression,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o expression: A channel expression.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef enum
{
ExtractChannelOp,
AssignChannelOp,
ExchangeChannelOp,
TransferChannelOp
} ChannelFx;
static MagickBooleanType ChannelImage(Image *destination_image,
const PixelChannel destination_channel,const ChannelFx channel_op,
const Image *source_image,const PixelChannel source_channel,
const Quantum pixel,ExceptionInfo *exception)
{
CacheView
*source_view,
*destination_view;
MagickBooleanType
status;
size_t
height,
width;
ssize_t
y;
status=MagickTrue;
source_view=AcquireVirtualCacheView(source_image,exception);
destination_view=AcquireAuthenticCacheView(destination_image,exception);
height=MagickMin(source_image->rows,destination_image->rows);
width=MagickMin(source_image->columns,destination_image->columns);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source_image,source_image,height,1)
#endif
for (y=0; y < (ssize_t) height; y++)
{
PixelTrait
destination_traits,
source_traits;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(destination_view,0,y,
destination_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
destination_traits=GetPixelChannelTraits(destination_image,
destination_channel);
source_traits=GetPixelChannelTraits(source_image,source_channel);
if ((destination_traits == UndefinedPixelTrait) ||
(source_traits == UndefinedPixelTrait))
continue;
for (x=0; x < (ssize_t) width; x++)
{
if (channel_op == AssignChannelOp)
SetPixelChannel(destination_image,destination_channel,pixel,q);
else
SetPixelChannel(destination_image,destination_channel,
GetPixelChannel(source_image,source_channel,p),q);
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(destination_image);
}
if (SyncCacheViewAuthenticPixels(destination_view,exception) == MagickFalse)
status=MagickFalse;
}
destination_view=DestroyCacheView(destination_view);
source_view=DestroyCacheView(source_view);
return(status);
}
MagickExport Image *ChannelFxImage(const Image *image,const char *expression,
ExceptionInfo *exception)
{
#define ChannelFxImageTag "ChannelFx/Image"
ChannelFx
channel_op;
ChannelType
channel_mask;
char
token[MagickPathExtent];
const char
*p;
const Image
*source_image;
double
pixel;
Image
*destination_image;
MagickBooleanType
status;
PixelChannel
source_channel,
destination_channel;
ssize_t
channels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
source_image=image;
destination_image=CloneImage(source_image,0,0,MagickTrue,exception);
if (destination_image == (Image *) NULL)
return((Image *) NULL);
if (expression == (const char *) NULL)
return(destination_image);
status=SetImageStorageClass(destination_image,DirectClass,exception);
if (status == MagickFalse)
{
destination_image=GetLastImageInList(destination_image);
return((Image *) NULL);
}
destination_channel=RedPixelChannel;
channel_mask=UndefinedChannel;
pixel=0.0;
p=(char *) expression;
(void) GetNextToken(p,&p,MagickPathExtent,token);
channel_op=ExtractChannelOp;
for (channels=0; *token != '\0'; )
{
ssize_t
i;
/*
Interpret channel expression.
*/
switch (*token)
{
case ',':
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
break;
}
case '|':
{
if (GetNextImageInList(source_image) != (Image *) NULL)
source_image=GetNextImageInList(source_image);
else
source_image=GetFirstImageInList(source_image);
(void) GetNextToken(p,&p,MagickPathExtent,token);
break;
}
case ';':
{
Image
*canvas;
(void) SetPixelChannelMask(destination_image,channel_mask);
if ((channel_op == ExtractChannelOp) && (channels == 1))
{
(void) SetPixelMetaChannels(destination_image,0,exception);
(void) SetImageColorspace(destination_image,GRAYColorspace,
exception);
}
canvas=CloneImage(source_image,0,0,MagickTrue,exception);
if (canvas == (Image *) NULL)
{
destination_image=DestroyImageList(destination_image);
return(destination_image);
}
AppendImageToList(&destination_image,canvas);
destination_image=GetLastImageInList(destination_image);
status=SetImageStorageClass(destination_image,DirectClass,exception);
if (status == MagickFalse)
{
destination_image=GetLastImageInList(destination_image);
return((Image *) NULL);
}
(void) GetNextToken(p,&p,MagickPathExtent,token);
channels=0;
destination_channel=RedPixelChannel;
channel_mask=UndefinedChannel;
break;
}
default:
break;
}
i=ParsePixelChannelOption(token);
if (i < 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnrecognizedChannelType","`%s'",token);
destination_image=DestroyImageList(destination_image);
return(destination_image);
}
source_channel=(PixelChannel) i;
channel_op=ExtractChannelOp;
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == '<')
{
channel_op=ExchangeChannelOp;
(void) GetNextToken(p,&p,MagickPathExtent,token);
}
if (*token == '=')
{
if (channel_op != ExchangeChannelOp)
channel_op=AssignChannelOp;
(void) GetNextToken(p,&p,MagickPathExtent,token);
}
if (*token == '>')
{
if (channel_op != ExchangeChannelOp)
channel_op=TransferChannelOp;
(void) GetNextToken(p,&p,MagickPathExtent,token);
}
switch (channel_op)
{
case AssignChannelOp:
case ExchangeChannelOp:
case TransferChannelOp:
{
if (channel_op == AssignChannelOp)
pixel=StringToDoubleInterval(token,(double) QuantumRange+1.0);
else
{
i=ParsePixelChannelOption(token);
if (i < 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnrecognizedChannelType","`%s'",token);
destination_image=DestroyImageList(destination_image);
return(destination_image);
}
}
destination_channel=(PixelChannel) i;
if (i >= (ssize_t) GetPixelChannels(destination_image))
(void) SetPixelMetaChannels(destination_image,(size_t) (
destination_channel-GetPixelChannels(destination_image)+1),
exception);
if (image->colorspace != UndefinedColorspace)
switch (destination_channel)
{
case RedPixelChannel:
case GreenPixelChannel:
case BluePixelChannel:
case BlackPixelChannel:
case IndexPixelChannel:
break;
case AlphaPixelChannel:
{
destination_image->alpha_trait=BlendPixelTrait;
break;
}
case CompositeMaskPixelChannel:
{
destination_image->channels=(ChannelType)
(destination_image->channels | CompositeMaskChannel);
break;
}
case ReadMaskPixelChannel:
{
destination_image->channels=(ChannelType)
(destination_image->channels | ReadMaskChannel);
break;
}
case WriteMaskPixelChannel:
{
destination_image->channels=(ChannelType)
(destination_image->channels | WriteMaskChannel);
break;
}
case MetaPixelChannel:
default:
{
(void) SetPixelMetaChannels(destination_image,(size_t) (
destination_channel-GetPixelChannels(destination_image)+1),
exception);
break;
}
}
channel_mask=(ChannelType) (channel_mask | ParseChannelOption(token));
if (((channels >= 1) || (destination_channel >= 1)) &&
(IsGrayColorspace(destination_image->colorspace) != MagickFalse))
(void) SetImageColorspace(destination_image,sRGBColorspace,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
break;
}
default:
break;
}
status=ChannelImage(destination_image,destination_channel,channel_op,
source_image,source_channel,ClampToQuantum(pixel),exception);
if (status == MagickFalse)
{
destination_image=DestroyImageList(destination_image);
break;
}
channels++;
if (channel_op == ExchangeChannelOp)
{
status=ChannelImage(destination_image,source_channel,channel_op,
source_image,destination_channel,ClampToQuantum(pixel),exception);
if (status == MagickFalse)
{
destination_image=DestroyImageList(destination_image);
break;
}
channels++;
}
switch (channel_op)
{
case ExtractChannelOp:
{
channel_mask=(ChannelType) (channel_mask |
(1UL << destination_channel));
destination_channel=(PixelChannel) (destination_channel+1);
break;
}
default:
break;
}
status=SetImageProgress(source_image,ChannelFxImageTag,p-expression,
strlen(expression));
if (status == MagickFalse)
break;
}
(void) SetPixelChannelMask(destination_image,channel_mask);
if ((channel_op == ExtractChannelOp) && (channels == 1))
{
(void) SetPixelMetaChannels(destination_image,0,exception);
(void) SetImageColorspace(destination_image,GRAYColorspace,exception);
}
return(GetFirstImageInList(destination_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m b i n e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CombineImages() combines one or more images into a single image. The
% grayscale value of the pixels of each image in the sequence is assigned in
% order to the specified channels of the combined image. The typical
% ordering would be image 1 => Red, 2 => Green, 3 => Blue, etc.
%
% The format of the CombineImages method is:
%
% Image *CombineImages(const Image *images,const ColorspaceType colorspace,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o colorspace: the image colorspace.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CombineImages(const Image *image,
const ColorspaceType colorspace,ExceptionInfo *exception)
{
#define CombineImageTag "Combine/Image"
CacheView
*combine_view;
Image
*combine_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Ensure the image are the same size.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
combine_image=CloneImage(image,0,0,MagickTrue,exception);
if (combine_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(combine_image,DirectClass,exception) == MagickFalse)
{
combine_image=DestroyImage(combine_image);
return((Image *) NULL);
}
if (colorspace != UndefinedColorspace)
(void) SetImageColorspace(combine_image,colorspace,exception);
else
if (fabs(image->gamma-1.0) <= MagickEpsilon)
(void) SetImageColorspace(combine_image,RGBColorspace,exception);
else
(void) SetImageColorspace(combine_image,sRGBColorspace,exception);
switch (combine_image->colorspace)
{
case UndefinedColorspace:
case sRGBColorspace:
{
if (GetImageListLength(image) > 3)
combine_image->alpha_trait=BlendPixelTrait;
break;
}
case LinearGRAYColorspace:
case GRAYColorspace:
{
if (GetImageListLength(image) > 1)
combine_image->alpha_trait=BlendPixelTrait;
break;
}
case CMYKColorspace:
{
if (GetImageListLength(image) > 4)
combine_image->alpha_trait=BlendPixelTrait;
break;
}
default:
break;
}
/*
Combine images.
*/
status=MagickTrue;
progress=0;
combine_view=AcquireAuthenticCacheView(combine_image,exception);
for (y=0; y < (ssize_t) combine_image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
Quantum
*pixels;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
i;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(combine_view,0,y,combine_image->columns,
1,exception);
if (pixels == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
next=image;
for (i=0; i < (ssize_t) GetPixelChannels(combine_image); i++)
{
register ssize_t
x;
PixelChannel channel = GetPixelChannelChannel(combine_image,i);
PixelTrait traits = GetPixelChannelTraits(combine_image,channel);
if (traits == UndefinedPixelTrait)
continue;
if (next == (Image *) NULL)
continue;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
if (p == (const Quantum *) NULL)
continue;
q=pixels;
for (x=0; x < (ssize_t) combine_image->columns; x++)
{
if (x < (ssize_t) next->columns)
{
q[i]=GetPixelGray(next,p);
p+=GetPixelChannels(next);
}
q+=GetPixelChannels(combine_image);
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
if (SyncCacheViewAuthenticPixels(combine_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CombineImageTag,progress,
combine_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
combine_view=DestroyCacheView(combine_view);
if (status == MagickFalse)
combine_image=DestroyImage(combine_image);
return(combine_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e A l p h a C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageAlphaChannel() returns MagickFalse if the image alpha channel is
% not activated. That is, the image is RGB rather than RGBA or CMYK rather
% than CMYKA.
%
% The format of the GetImageAlphaChannel method is:
%
% MagickBooleanType GetImageAlphaChannel(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType GetImageAlphaChannel(const Image *image)
{
assert(image != (const Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
return(image->alpha_trait != UndefinedPixelTrait ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e p a r a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SeparateImage() separates a channel from the image and returns it as a
% grayscale image.
%
% The format of the SeparateImage method is:
%
% Image *SeparateImage(const Image *image,const ChannelType channel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the image channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SeparateImage(const Image *image,
const ChannelType channel_type,ExceptionInfo *exception)
{
#define GetChannelBit(mask,bit) (((size_t) (mask) >> (size_t) (bit)) & 0x01)
#define SeparateImageTag "Separate/Image"
CacheView
*image_view,
*separate_view;
Image
*separate_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Initialize separate image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
separate_image=CloneImage(image,0,0,MagickTrue,exception);
if (separate_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(separate_image,DirectClass,exception) == MagickFalse)
{
separate_image=DestroyImage(separate_image);
return((Image *) NULL);
}
separate_image->alpha_trait=UndefinedPixelTrait;
(void) SetImageColorspace(separate_image,GRAYColorspace,exception);
separate_image->gamma=image->gamma;
/*
Separate image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
separate_view=AcquireAuthenticCacheView(separate_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(separate_view,0,y,separate_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
SetPixelChannel(separate_image,GrayPixelChannel,(Quantum) 0,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(GetChannelBit(channel_type,channel) == 0))
continue;
SetPixelChannel(separate_image,GrayPixelChannel,p[i],q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(separate_image);
}
if (SyncCacheViewAuthenticPixels(separate_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SeparateImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
separate_view=DestroyCacheView(separate_view);
image_view=DestroyCacheView(image_view);
(void) SetImageChannelMask(separate_image,DefaultChannels);
if (status == MagickFalse)
separate_image=DestroyImage(separate_image);
return(separate_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e p a r a t e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SeparateImages() returns a separate grayscale image for each channel
% specified.
%
% The format of the SeparateImages method is:
%
% Image *SeparateImages(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SeparateImages(const Image *image,ExceptionInfo *exception)
{
Image
*images,
*separate_image;
register ssize_t
i;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
images=NewImageList();
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) || ((traits & UpdatePixelTrait) == 0))
continue;
separate_image=SeparateImage(image,(ChannelType) (1UL << channel),
exception);
if (separate_image != (Image *) NULL)
AppendImageToList(&images,separate_image);
}
if (images == (Image *) NULL)
images=SeparateImage(image,UndefinedChannel,exception);
return(images);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e A l p h a C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageAlphaChannel() activates, deactivates, resets, or sets the alpha
% channel.
%
% The format of the SetImageAlphaChannel method is:
%
% MagickBooleanType SetImageAlphaChannel(Image *image,
% const AlphaChannelOption alpha_type,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o alpha_type: The alpha channel type: ActivateAlphaChannel,
% AssociateAlphaChannel, CopyAlphaChannel, DeactivateAlphaChannel,
% DisassociateAlphaChannel, ExtractAlphaChannel, OffAlphaChannel,
% OnAlphaChannel, OpaqueAlphaChannel, SetAlphaChannel, ShapeAlphaChannel,
% and TransparentAlphaChannel.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void FlattenPixelInfo(const Image *image,const PixelInfo *p,
const double alpha,const Quantum *q,const double beta,
Quantum *composite)
{
double
Da,
gamma,
Sa;
register ssize_t
i;
/*
Compose pixel p over pixel q with the given alpha.
*/
Sa=QuantumScale*alpha;
Da=QuantumScale*beta,
gamma=Sa*(-Da)+Sa+Da;
gamma=PerceptibleReciprocal(gamma);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
switch (channel)
{
case RedPixelChannel:
{
composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta,
(double) p->red,alpha));
break;
}
case GreenPixelChannel:
{
composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta,
(double) p->green,alpha));
break;
}
case BluePixelChannel:
{
composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta,
(double) p->blue,alpha));
break;
}
case BlackPixelChannel:
{
composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta,
(double) p->black,alpha));
break;
}
case AlphaPixelChannel:
{
composite[i]=ClampToQuantum(QuantumRange*(Sa*(-Da)+Sa+Da));
break;
}
default:
break;
}
}
}
MagickExport MagickBooleanType SetImageAlphaChannel(Image *image,
const AlphaChannelOption alpha_type,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
status=MagickTrue;
switch (alpha_type)
{
case ActivateAlphaChannel:
{
image->alpha_trait=BlendPixelTrait;
break;
}
case AssociateAlphaChannel:
{
/*
Associate alpha.
*/
status=SetImageStorageClass(image,DirectClass,exception);
if (status == MagickFalse)
break;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
register ssize_t
i;
gamma=QuantumScale*GetPixelAlpha(image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (channel == AlphaPixelChannel)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(gamma*q[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->alpha_trait=CopyPixelTrait;
return(status);
}
case BackgroundAlphaChannel:
{
/*
Set transparent pixels to background color.
*/
if (image->alpha_trait == UndefinedPixelTrait)
break;
status=SetImageStorageClass(image,DirectClass,exception);
if (status == MagickFalse)
break;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelAlpha(image,q) == TransparentAlpha)
{
SetPixelViaPixelInfo(image,&image->background_color,q);
SetPixelChannel(image,AlphaPixelChannel,TransparentAlpha,q);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
case CopyAlphaChannel:
{
image->alpha_trait=UpdatePixelTrait;
status=CompositeImage(image,image,IntensityCompositeOp,MagickTrue,0,0,
exception);
break;
}
case DeactivateAlphaChannel:
{
if (image->alpha_trait == UndefinedPixelTrait)
status=SetImageAlpha(image,OpaqueAlpha,exception);
image->alpha_trait=CopyPixelTrait;
break;
}
case DisassociateAlphaChannel:
{
/*
Disassociate alpha.
*/
status=SetImageStorageClass(image,DirectClass,exception);
if (status == MagickFalse)
break;
image->alpha_trait=BlendPixelTrait;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma,
Sa;
register ssize_t
i;
Sa=QuantumScale*GetPixelAlpha(image,q);
gamma=PerceptibleReciprocal(Sa);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (channel == AlphaPixelChannel)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(gamma*q[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->alpha_trait=UndefinedPixelTrait;
return(status);
}
case DiscreteAlphaChannel:
{
if (image->alpha_trait == UndefinedPixelTrait)
status=SetImageAlpha(image,OpaqueAlpha,exception);
image->alpha_trait=UpdatePixelTrait;
break;
}
case ExtractAlphaChannel:
{
status=CompositeImage(image,image,AlphaCompositeOp,MagickTrue,0,0,
exception);
image->alpha_trait=UndefinedPixelTrait;
break;
}
case OffAlphaChannel:
{
image->alpha_trait=UndefinedPixelTrait;
break;
}
case OnAlphaChannel:
{
if (image->alpha_trait == UndefinedPixelTrait)
status=SetImageAlpha(image,OpaqueAlpha,exception);
image->alpha_trait=BlendPixelTrait;
break;
}
case OpaqueAlphaChannel:
{
status=SetImageAlpha(image,OpaqueAlpha,exception);
break;
}
case RemoveAlphaChannel:
{
/*
Remove transparency.
*/
if (image->alpha_trait == UndefinedPixelTrait)
break;
status=SetImageStorageClass(image,DirectClass,exception);
if (status == MagickFalse)
break;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
FlattenPixelInfo(image,&image->background_color,
image->background_color.alpha,q,(double) GetPixelAlpha(image,q),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->alpha_trait=image->background_color.alpha_trait;
break;
}
case SetAlphaChannel:
{
if (image->alpha_trait == UndefinedPixelTrait)
status=SetImageAlpha(image,OpaqueAlpha,exception);
break;
}
case ShapeAlphaChannel:
{
PixelInfo
background;
/*
Remove transparency.
*/
ConformPixelInfo(image,&image->background_color,&background,exception);
background.alpha_trait=BlendPixelTrait;
image->alpha_trait=BlendPixelTrait;
status=SetImageStorageClass(image,DirectClass,exception);
if (status == MagickFalse)
break;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=background;
for (x=0; x < (ssize_t) image->columns; x++)
{
pixel.alpha=GetPixelIntensity(image,q);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
break;
}
case TransparentAlphaChannel:
{
image->alpha_trait=BlendPixelTrait;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelWriteMask(image,q) <= (QuantumRange/2))
SetPixelAlpha(image,TransparentAlpha,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
break;
}
case UndefinedAlphaChannel:
break;
}
if (status == MagickFalse)
return(status);
(void) SetPixelChannelMask(image,image->channel_mask);
return(SyncImagePixelCache(image,exception));
}
|
team.c | // RUN: %libomp-compile-and-run | FileCheck %s
// REQUIRES: ompt
// UNSUPPORTED: gcc
// Compilation fails for icc
// XFAIL: icc
#include "callback.h"
int main() {
#pragma omp target teams num_teams(1) thread_limit(1)
{ printf("In teams\n"); }
return 0;
}
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
// CHECK-NOT: 0: parallel_data initially not null
// CHECK-NOT: 0: task_data initially not null
// CHECK-NOT: 0: thread_data initially not null
// CHECK: {{^}}[[MASTER:[0-9]+]]: ompt_event_initial_task_begin:
// CHECK-SAME: task_id=[[INIT_TASK:[0-9]+]], {{.*}}, index=1
// CHECK: {{^}}[[MASTER]]: ompt_event_teams_begin:
// CHECK-SAME: parent_task_id=[[INIT_TASK]]
// CHECK-SAME: {{.*}} requested_num_teams=1
// CHECK-SAME: {{.*}} invoker=[[TEAMS_FLAGS:[0-9]+]]
// initial task in the teams construct starts
// CHECK: {{^}}[[MASTER]]: ompt_event_initial_task_begin:
// CHECK-SAME: task_id=[[INIT_TASK_0:[0-9]+]], actual_parallelism=1, index=0
// parallel region forked by runtime
// CHECK: {{^}}[[MASTER]]: ompt_event_parallel_begin:
// CHECK-SAME: {{.*}} parent_task_id=[[INIT_TASK_0]]
// CHECK-SAME: {{.*}} parallel_id=[[PAR_0:[0-9]+]]
// CHECK: {{^}}[[MASTER]]: ompt_event_implicit_task_begin:
// CHECK-SAME: {{.*}} parallel_id=[[PAR_0]], task_id=[[IMPL_TASK_0:[0-9]+]]
// CHECK: {{^}}[[MASTER]]: ompt_event_implicit_task_end:
// CHECK-SAME: {{.*}} parallel_id={{[0-9]+}}, task_id=[[IMPL_TASK_0]]
// CHECK: {{^}}[[MASTER]]: ompt_event_parallel_end:
// CHECK-SAME: {{.*}} parallel_id=[[PAR_0]], task_id=[[INIT_TASK_0]]
// initial task in the teams construct ends
// CHECK: {{^}}[[MASTER]]: ompt_event_initial_task_end:
// CHECK-SAME: task_id=[[INIT_TASK_0]], actual_parallelism=0, index=0
// CHECK: {{^}}[[MASTER]]: ompt_event_teams_end:
// CHECK-SAME: {{.*}} task_id=[[INIT_TASK]], invoker=[[TEAMS_FLAGS]]
// CHECK: {{^}}[[MASTER]]: ompt_event_initial_task_end:
// CHECK-SAME: task_id=[[INIT_TASK]], {{.*}}, index=1
|
par_csr_matvec.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Matvec functions for hypre_CSRMatrix class.
*
*****************************************************************************/
#include "_hypre_parcsr_mv.h"
#include "_hypre_utilities.hpp" //RL: TODO par_csr_matvec_device.c, include cuda there
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixMatvec
*--------------------------------------------------------------------------*/
// y = alpha*A*x + beta*b
HYPRE_Int
hypre_ParCSRMatrixMatvecOutOfPlace( HYPRE_Complex alpha,
hypre_ParCSRMatrix *A,
hypre_ParVector *x,
HYPRE_Complex beta,
hypre_ParVector *b,
hypre_ParVector *y )
{
hypre_ParCSRCommHandle **comm_handle;
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A);
hypre_Vector *x_local = hypre_ParVectorLocalVector(x);
hypre_Vector *b_local = hypre_ParVectorLocalVector(b);
hypre_Vector *y_local = hypre_ParVectorLocalVector(y);
hypre_Vector *x_tmp;
HYPRE_BigInt num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(A);
HYPRE_BigInt x_size = hypre_ParVectorGlobalSize(x);
HYPRE_BigInt b_size = hypre_ParVectorGlobalSize(b);
HYPRE_BigInt y_size = hypre_ParVectorGlobalSize(y);
HYPRE_Int num_vectors = hypre_VectorNumVectors(x_local);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(offd);
HYPRE_Int ierr = 0;
HYPRE_Int num_sends, jv;
HYPRE_Int vecstride = hypre_VectorVectorStride( x_local );
HYPRE_Int idxstride = hypre_VectorIndexStride( x_local );
HYPRE_Complex *x_tmp_data, **x_buf_data;
HYPRE_Complex *x_local_data = hypre_VectorData(x_local);
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP)
HYPRE_Int sync_stream = hypre_HandleCudaComputeStreamSync(hypre_handle());
hypre_HandleCudaComputeStreamSync(hypre_handle()) = 0;
#endif
/*---------------------------------------------------------------------
* Check for size compatibility. ParMatvec returns ierr = 11 if
* length of X doesn't equal the number of columns of A,
* ierr = 12 if the length of Y doesn't equal the number of rows
* of A, and ierr = 13 if both are true.
*
* Because temporary vectors are often used in ParMatvec, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
hypre_assert( idxstride>0 );
if (num_cols != x_size)
{
ierr = 11;
}
if (num_rows != y_size || num_rows != b_size)
{
ierr = 12;
}
if (num_cols != x_size && (num_rows != y_size || num_rows != b_size))
{
ierr = 13;
}
hypre_assert( hypre_VectorNumVectors(b_local) == num_vectors );
hypre_assert( hypre_VectorNumVectors(y_local) == num_vectors );
if ( num_vectors == 1 )
{
x_tmp = hypre_SeqVectorCreate( num_cols_offd );
}
else
{
hypre_assert( num_vectors > 1 );
x_tmp = hypre_SeqMultiVectorCreate( num_cols_offd, num_vectors );
}
/*---------------------------------------------------------------------
* If there exists no CommPkg for A, a CommPkg is generated using
* equally load balanced partitionings
*--------------------------------------------------------------------*/
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
hypre_assert( num_cols_offd == hypre_ParCSRCommPkgRecvVecStart(comm_pkg, hypre_ParCSRCommPkgNumRecvs(comm_pkg)) );
hypre_assert( hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0) == 0 );
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime();
#endif
HYPRE_Int use_persistent_comm = 0;
#ifdef HYPRE_USING_PERSISTENT_COMM
use_persistent_comm = num_vectors == 1;
// JSP TODO: we can use persistent communication for multi-vectors,
// but then we need different communication handles for different
// num_vectors.
hypre_ParCSRPersistentCommHandle *persistent_comm_handle;
#endif
if (use_persistent_comm)
{
#ifdef HYPRE_USING_PERSISTENT_COMM
persistent_comm_handle = hypre_ParCSRCommPkgGetPersistentCommHandle(1, comm_pkg);
#endif
}
else
{
comm_handle = hypre_CTAlloc(hypre_ParCSRCommHandle*, num_vectors, HYPRE_MEMORY_HOST);
}
/* x_tmp */
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP)
/* for GPU and single vector, alloc persistent memory for x_tmp (in comm_pkg) and reuse */
if (num_vectors == 1)
{
if (!hypre_ParCSRCommPkgTmpData(comm_pkg))
{
/* hypre_ParCSRCommPkgTmpData(comm_pkg) = hypre_TAlloc(HYPRE_Complex, num_cols_offd, HYPRE_MEMORY_DEVICE); */
hypre_ParCSRCommPkgTmpData(comm_pkg) = _hypre_TAlloc(HYPRE_Complex, num_cols_offd, hypre_MEMORY_DEVICE);
}
hypre_VectorData(x_tmp) = hypre_ParCSRCommPkgTmpData(comm_pkg);
hypre_SeqVectorSetDataOwner(x_tmp, 0);
}
#else
if (use_persistent_comm)
{
#ifdef HYPRE_USING_PERSISTENT_COMM
hypre_VectorData(x_tmp) = (HYPRE_Complex *) hypre_ParCSRCommHandleRecvDataBuffer(persistent_comm_handle);
hypre_SeqVectorSetDataOwner(x_tmp, 0);
#endif
}
#endif
hypre_SeqVectorInitialize_v2(x_tmp, HYPRE_MEMORY_DEVICE);
x_tmp_data = hypre_VectorData(x_tmp);
/* x_buff_data */
x_buf_data = hypre_CTAlloc(HYPRE_Complex*, num_vectors, HYPRE_MEMORY_HOST);
for (jv = 0; jv < num_vectors; ++jv)
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP)
if (jv == 0)
{
if (!hypre_ParCSRCommPkgBufData(comm_pkg))
{
/*
hypre_ParCSRCommPkgBufData(comm_pkg) = hypre_TAlloc(HYPRE_Complex,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),
HYPRE_MEMORY_DEVICE);
*/
hypre_ParCSRCommPkgBufData(comm_pkg) = _hypre_TAlloc(HYPRE_Complex,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),
hypre_MEMORY_DEVICE);
}
x_buf_data[0] = hypre_ParCSRCommPkgBufData(comm_pkg);
continue;
}
#endif
if (use_persistent_comm)
{
#ifdef HYPRE_USING_PERSISTENT_COMM
x_buf_data[0] = (HYPRE_Complex *) hypre_ParCSRCommHandleSendDataBuffer(persistent_comm_handle);
continue;
#endif
}
x_buf_data[jv] = hypre_TAlloc(HYPRE_Complex,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),
HYPRE_MEMORY_DEVICE);
}
/* The assert is because the following loop only works for 'column'
storage of a multivector. This needs to be fixed to work more generally,
at least for 'row' storage. This in turn, means either change CommPkg so
num_sends is no.zones*no.vectors (not no.zones) or, less dangerously, put
a stride in the logic of CommHandleCreate (stride either from a new arg or
a new variable inside CommPkg). Or put the num_vector iteration inside
CommHandleCreate (perhaps a new multivector variant of it).
*/
hypre_assert( idxstride == 1 );
//hypre_SeqVectorPrefetch(x_local, HYPRE_MEMORY_DEVICE);
/* send_map_elmts on device */
hypre_ParCSRCommPkgCopySendMapElmtsToDevice(comm_pkg);
for (jv = 0; jv < num_vectors; ++jv)
{
HYPRE_Complex *send_data = (HYPRE_Complex *) x_buf_data[jv];
HYPRE_Complex *locl_data = x_local_data + jv * vecstride;
/* if on device, no need to Sync: send_data is on device memory */
#if defined(HYPRE_USING_CUDA)
/* pack send data on device */
HYPRE_THRUST_CALL( gather,
hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg),
hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg) +
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),
locl_data,
send_data );
#elif defined(HYPRE_USING_DEVICE_OPENMP)
/* pack send data on device */
HYPRE_Int i;
HYPRE_Int *device_send_map_elmts = hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg);
HYPRE_Int start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0);
HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends);
#pragma omp target teams distribute parallel for private(i) is_device_ptr(send_data, locl_data, device_send_map_elmts)
for (i = start; i < end; i++)
{
send_data[i] = locl_data[device_send_map_elmts[i]];
}
#else
HYPRE_Int i;
/* pack send data on host */
#if defined(HYPRE_USING_OPENMP)
#pragma omp parallel for HYPRE_SMP_SCHEDULE
#endif
for (i = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0);
i < hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends);
i ++)
{
send_data[i] = locl_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,i)];
}
#endif
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime();
hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime();
#endif
/* nonblocking communication starts */
if (use_persistent_comm)
{
#ifdef HYPRE_USING_PERSISTENT_COMM
hypre_ParCSRPersistentCommHandleStart(persistent_comm_handle, HYPRE_MEMORY_DEVICE, x_buf_data[0]);
#endif
}
else
{
for ( jv = 0; jv < num_vectors; ++jv )
{
comm_handle[jv] = hypre_ParCSRCommHandleCreate_v2( 1, comm_pkg, HYPRE_MEMORY_DEVICE, x_buf_data[jv],
HYPRE_MEMORY_DEVICE, &x_tmp_data[jv*num_cols_offd] );
}
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime();
#endif
/* overlapped local computation */
hypre_CSRMatrixMatvecOutOfPlace( alpha, diag, x_local, beta, b_local, y_local, 0 );
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime();
#endif
/* nonblocking communication ends */
if (use_persistent_comm)
{
#ifdef HYPRE_USING_PERSISTENT_COMM
hypre_ParCSRPersistentCommHandleWait(persistent_comm_handle, HYPRE_MEMORY_DEVICE, x_tmp_data);
#endif
}
else
{
for ( jv = 0; jv < num_vectors; ++jv )
{
hypre_ParCSRCommHandleDestroy(comm_handle[jv]);
comm_handle[jv] = NULL;
}
hypre_TFree(comm_handle, HYPRE_MEMORY_HOST);
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime();
#endif
/* computation offd part */
if (num_cols_offd)
{
hypre_CSRMatrixMatvec( alpha, offd, x_tmp, 1.0, y_local );
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime();
#endif
hypre_SeqVectorDestroy(x_tmp); x_tmp = NULL;
if (!use_persistent_comm)
{
for ( jv = 0; jv < num_vectors; ++jv )
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP)
if (jv == 0)
{
continue;
}
#endif
hypre_TFree(x_buf_data[jv], HYPRE_MEMORY_DEVICE);
}
hypre_TFree(x_buf_data, HYPRE_MEMORY_HOST);
}
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP)
hypre_HandleCudaComputeStreamSync(hypre_handle()) = sync_stream;
hypre_SyncCudaComputeStream(hypre_handle());
#endif
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime();
#endif
return ierr;
}
HYPRE_Int
hypre_ParCSRMatrixMatvec( HYPRE_Complex alpha,
hypre_ParCSRMatrix *A,
hypre_ParVector *x,
HYPRE_Complex beta,
hypre_ParVector *y )
{
return hypre_ParCSRMatrixMatvecOutOfPlace(alpha, A, x, beta, y, y);
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixMatvecT
*
* Performs y <- alpha * A^T * x + beta * y
*
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixMatvecT( HYPRE_Complex alpha,
hypre_ParCSRMatrix *A,
hypre_ParVector *x,
HYPRE_Complex beta,
hypre_ParVector *y )
{
hypre_ParCSRCommHandle **comm_handle;
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A);
hypre_CSRMatrix *diagT = hypre_ParCSRMatrixDiagT(A);
hypre_CSRMatrix *offdT = hypre_ParCSRMatrixOffdT(A);
hypre_Vector *x_local = hypre_ParVectorLocalVector(x);
hypre_Vector *y_local = hypre_ParVectorLocalVector(y);
hypre_Vector *y_tmp;
HYPRE_BigInt num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(A);
HYPRE_BigInt x_size = hypre_ParVectorGlobalSize(x);
HYPRE_BigInt y_size = hypre_ParVectorGlobalSize(y);
HYPRE_Int num_vectors = hypre_VectorNumVectors(y_local);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(offd);
HYPRE_Int ierr = 0;
HYPRE_Int num_sends, jv;
HYPRE_Int vecstride = hypre_VectorVectorStride(y_local);
HYPRE_Int idxstride = hypre_VectorIndexStride(y_local);
HYPRE_Complex *y_tmp_data, **y_buf_data;
HYPRE_Complex *y_local_data = hypre_VectorData(y_local);
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP)
HYPRE_Int sync_stream = hypre_HandleCudaComputeStreamSync(hypre_handle());
hypre_HandleCudaComputeStreamSync(hypre_handle()) = 0;
#endif
/*---------------------------------------------------------------------
* Check for size compatibility. MatvecT returns ierr = 1 if
* length of X doesn't equal the number of rows of A,
* ierr = 2 if the length of Y doesn't equal the number of
* columns of A, and ierr = 3 if both are true.
*
* Because temporary vectors are often used in MatvecT, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
if (num_rows != x_size)
{
ierr = 1;
}
if (num_cols != y_size)
{
ierr = 2;
}
if (num_rows != x_size && num_cols != y_size)
{
ierr = 3;
}
hypre_assert( hypre_VectorNumVectors(x_local) == num_vectors );
hypre_assert( hypre_VectorNumVectors(y_local) == num_vectors );
if ( num_vectors == 1 )
{
y_tmp = hypre_SeqVectorCreate(num_cols_offd);
}
else
{
hypre_assert( num_vectors > 1 );
y_tmp = hypre_SeqMultiVectorCreate(num_cols_offd, num_vectors);
}
/*---------------------------------------------------------------------
* If there exists no CommPkg for A, a CommPkg is generated using
* equally load balanced partitionings
*--------------------------------------------------------------------*/
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
hypre_assert( num_cols_offd == hypre_ParCSRCommPkgRecvVecStart(comm_pkg, hypre_ParCSRCommPkgNumRecvs(comm_pkg)) );
hypre_assert( hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0) == 0 );
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime();
#endif
HYPRE_Int use_persistent_comm = 0;
#ifdef HYPRE_USING_PERSISTENT_COMM
use_persistent_comm = num_vectors == 1;
// JSP TODO: we can use persistent communication for multi-vectors,
// but then we need different communication handles for different
// num_vectors.
hypre_ParCSRPersistentCommHandle *persistent_comm_handle;
#endif
if (use_persistent_comm)
{
#ifdef HYPRE_USING_PERSISTENT_COMM
persistent_comm_handle = hypre_ParCSRCommPkgGetPersistentCommHandle(2, comm_pkg);
#endif
}
else
{
comm_handle = hypre_CTAlloc(hypre_ParCSRCommHandle*, num_vectors, HYPRE_MEMORY_HOST);
}
/* y_tmp */
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP)
/* for GPU and single vector, alloc persistent memory for y_tmp (in comm_pkg) and reuse */
if (num_vectors == 1)
{
if (!hypre_ParCSRCommPkgTmpData(comm_pkg))
{
//hypre_ParCSRCommPkgTmpData(comm_pkg) = hypre_TAlloc(HYPRE_Complex, num_cols_offd, HYPRE_MEMORY_DEVICE);
hypre_ParCSRCommPkgTmpData(comm_pkg) = _hypre_TAlloc(HYPRE_Complex, num_cols_offd, hypre_MEMORY_DEVICE);
}
hypre_VectorData(y_tmp) = hypre_ParCSRCommPkgTmpData(comm_pkg);
hypre_SeqVectorSetDataOwner(y_tmp, 0);
}
#else
if (use_persistent_comm)
{
#ifdef HYPRE_USING_PERSISTENT_COMM
hypre_VectorData(y_tmp) = (HYPRE_Complex *) hypre_ParCSRCommHandleSendDataBuffer(persistent_comm_handle);
hypre_SeqVectorSetDataOwner(y_tmp, 0);
#endif
}
#endif
hypre_SeqVectorInitialize_v2(y_tmp, HYPRE_MEMORY_DEVICE);
y_tmp_data = hypre_VectorData(y_tmp);
/* y_buf_data */
y_buf_data = hypre_CTAlloc(HYPRE_Complex*, num_vectors, HYPRE_MEMORY_HOST);
for (jv = 0; jv < num_vectors; ++jv)
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP)
if (jv == 0)
{
if (!hypre_ParCSRCommPkgBufData(comm_pkg))
{
/*
hypre_ParCSRCommPkgBufData(comm_pkg) = hypre_TAlloc(HYPRE_Complex,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),
HYPRE_MEMORY_DEVICE);
*/
hypre_ParCSRCommPkgBufData(comm_pkg) = _hypre_TAlloc(HYPRE_Complex,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),
hypre_MEMORY_DEVICE);
}
y_buf_data[0] = hypre_ParCSRCommPkgBufData(comm_pkg);
continue;
}
#endif
if (use_persistent_comm)
{
#ifdef HYPRE_USING_PERSISTENT_COMM
y_buf_data[0] = (HYPRE_Complex *) hypre_ParCSRCommHandleRecvDataBuffer(persistent_comm_handle);
continue;
#endif
}
y_buf_data[jv] = hypre_TAlloc(HYPRE_Complex,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),
HYPRE_MEMORY_DEVICE);
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime();
#endif
if (num_cols_offd)
{
if (offdT)
{
// offdT is optional. Used only if it's present
hypre_CSRMatrixMatvec(alpha, offdT, x_local, 0.0, y_tmp);
}
else
{
hypre_CSRMatrixMatvecT(alpha, offd, x_local, 0.0, y_tmp);
}
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime();
#endif
if (use_persistent_comm)
{
#ifdef HYPRE_USING_PERSISTENT_COMM
hypre_ParCSRPersistentCommHandleStart(persistent_comm_handle, HYPRE_MEMORY_DEVICE, y_tmp_data);
#endif
}
else
{
for ( jv = 0; jv < num_vectors; ++jv )
{
/* this is where we assume multivectors are 'column' storage */
comm_handle[jv] = hypre_ParCSRCommHandleCreate_v2( 2, comm_pkg, HYPRE_MEMORY_DEVICE, &y_tmp_data[jv*num_cols_offd],
HYPRE_MEMORY_DEVICE, y_buf_data[jv] );
}
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime();
#endif
/* overlapped local computation */
if (diagT)
{
// diagT is optional. Used only if it's present.
hypre_CSRMatrixMatvec(alpha, diagT, x_local, beta, y_local);
}
else
{
hypre_CSRMatrixMatvecT(alpha, diag, x_local, beta, y_local);
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime();
#endif
/* nonblocking communication ends */
if (use_persistent_comm)
{
#ifdef HYPRE_USING_PERSISTENT_COMM
hypre_ParCSRPersistentCommHandleWait(persistent_comm_handle, HYPRE_MEMORY_DEVICE, y_buf_data[0]);
#endif
}
else
{
for ( jv = 0; jv < num_vectors; ++jv )
{
hypre_ParCSRCommHandleDestroy(comm_handle[jv]);
comm_handle[jv] = NULL;
}
hypre_TFree(comm_handle, HYPRE_MEMORY_HOST);
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime();
hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime();
#endif
/* The assert is because the following loop only works for 'column'
storage of a multivector. This needs to be fixed to work more generally,
at least for 'row' storage. This in turn, means either change CommPkg so
num_sends is no.zones*no.vectors (not no.zones) or, less dangerously, put
a stride in the logic of CommHandleCreate (stride either from a new arg or
a new variable inside CommPkg). Or put the num_vector iteration inside
CommHandleCreate (perhaps a new multivector variant of it).
*/
hypre_assert( idxstride == 1 );
/* send_map_elmts on device */
hypre_ParCSRCommPkgCopySendMapElmtsToDevice(comm_pkg);
for (jv = 0; jv < num_vectors; ++jv)
{
HYPRE_Complex *recv_data = (HYPRE_Complex *) y_buf_data[jv];
HYPRE_Complex *locl_data = y_local_data + jv * vecstride;
#if defined(HYPRE_USING_CUDA)
/* unpack recv data on device */
if (!hypre_ParCSRCommPkgWorkSpace(comm_pkg))
{
hypre_ParCSRCommPkgWorkSpace(comm_pkg) =
hypre_TAlloc( char,
(2*sizeof(HYPRE_Int)+sizeof(HYPRE_Real)) * hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),
HYPRE_MEMORY_DEVICE );
}
hypreDevice_GenScatterAdd(locl_data,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),
hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg),
recv_data,
hypre_ParCSRCommPkgWorkSpace(comm_pkg));
#elif defined(HYPRE_USING_DEVICE_OPENMP)
HYPRE_Int i, j;
/* unpack recv data on device */
for (i = 0; i < num_sends; i++)
{
HYPRE_Int *device_send_map_elmts = hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg);
HYPRE_Int start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1);
#pragma omp target teams distribute parallel for private(j) is_device_ptr(recv_data, locl_data, device_send_map_elmts)
for (j = start; j < end; j++)
{
locl_data[device_send_map_elmts[j]] += recv_data[j];
}
}
#else
HYPRE_Int i;
/* unpack recv data on host, TODO OMP? */
for (i = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0);
i < hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends);
i ++)
{
locl_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,i)] += recv_data[i];
}
#endif
}
hypre_SeqVectorDestroy(y_tmp); y_tmp = NULL;
if (!use_persistent_comm)
{
for ( jv = 0; jv < num_vectors; ++jv )
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP)
if (jv == 0)
{
continue;
}
#endif
hypre_TFree(y_buf_data[jv], HYPRE_MEMORY_DEVICE);
}
hypre_TFree(y_buf_data, HYPRE_MEMORY_HOST);
}
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP)
hypre_HandleCudaComputeStreamSync(hypre_handle()) = sync_stream;
hypre_SyncCudaComputeStream(hypre_handle());
#endif
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime();
#endif
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixMatvec_FF
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixMatvec_FF( HYPRE_Complex alpha,
hypre_ParCSRMatrix *A,
hypre_ParVector *x,
HYPRE_Complex beta,
hypre_ParVector *y,
HYPRE_Int *CF_marker,
HYPRE_Int fpt )
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A);
hypre_Vector *x_local = hypre_ParVectorLocalVector(x);
hypre_Vector *y_local = hypre_ParVectorLocalVector(y);
HYPRE_BigInt num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(A);
hypre_Vector *x_tmp;
HYPRE_BigInt x_size = hypre_ParVectorGlobalSize(x);
HYPRE_BigInt y_size = hypre_ParVectorGlobalSize(y);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(offd);
HYPRE_Int ierr = 0;
HYPRE_Int num_sends, i, j, index, start, num_procs;
HYPRE_Int *int_buf_data = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Complex *x_tmp_data = NULL;
HYPRE_Complex *x_buf_data = NULL;
HYPRE_Complex *x_local_data = hypre_VectorData(x_local);
/*---------------------------------------------------------------------
* Check for size compatibility. ParMatvec returns ierr = 11 if
* length of X doesn't equal the number of columns of A,
* ierr = 12 if the length of Y doesn't equal the number of rows
* of A, and ierr = 13 if both are true.
*
* Because temporary vectors are often used in ParMatvec, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
hypre_MPI_Comm_size(comm,&num_procs);
if (num_cols != x_size)
ierr = 11;
if (num_rows != y_size)
ierr = 12;
if (num_cols != x_size && num_rows != y_size)
ierr = 13;
if (num_procs > 1)
{
if (num_cols_offd)
{
x_tmp = hypre_SeqVectorCreate( num_cols_offd );
hypre_SeqVectorInitialize(x_tmp);
x_tmp_data = hypre_VectorData(x_tmp);
}
/*---------------------------------------------------------------------
* If there exists no CommPkg for A, a CommPkg is generated using
* equally load balanced partitionings
*--------------------------------------------------------------------*/
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
if (num_sends)
x_buf_data = hypre_CTAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart
(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
x_buf_data[index++]
= x_local_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle =
hypre_ParCSRCommHandleCreate ( 1, comm_pkg, x_buf_data, x_tmp_data );
}
hypre_CSRMatrixMatvec_FF( alpha, diag, x_local, beta, y_local, CF_marker,
CF_marker, fpt);
if (num_procs > 1)
{
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
if (num_sends)
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart
(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
if (num_cols_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle =
hypre_ParCSRCommHandleCreate(11,comm_pkg,int_buf_data,CF_marker_offd );
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
if (num_cols_offd) hypre_CSRMatrixMatvec_FF( alpha, offd, x_tmp, 1.0, y_local,
CF_marker, CF_marker_offd, fpt);
hypre_SeqVectorDestroy(x_tmp);
x_tmp = NULL;
hypre_TFree(x_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
}
return ierr;
}
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 32;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
helpme_standalone.h |
//
// WARNING! This file is automatically generated from the sources in the src directory.
// Do not modify this source code directly as any changes will be overwritten
//
// original file: ../src/helpme.h
// BEGINLICENSE
//
// This file is part of helPME, which is distributed under the BSD 3-clause license,
// as described in the LICENSE file in the top level directory of this project.
//
// Author: Andrew C. Simmonett
//
// ENDLICENSE
#ifndef _HELPME_STANDALONE_HELPME_H_
#define _HELPME_STANDALONE_HELPME_H_
#if __cplusplus || DOXYGEN
// C++ header
#include <algorithm>
#include <array>
#include <cmath>
#include <complex>
#include <functional>
#include <iostream>
#include <list>
#include <memory>
#ifdef _OPENMP
#include <omp.h>
#endif
#include <set>
#include <stdexcept>
#include <string>
#include <tuple>
#include <unistd.h>
#include <vector>
// original file: ../src/cartesiantransform.h
// BEGINLICENSE
//
// This file is part of helPME, which is distributed under the BSD 3-clause license,
// as described in the LICENSE file in the top level directory of this project.
//
// Author: Andrew C. Simmonett
//
// ENDLICENSE
#ifndef _HELPME_STANDALONE_CARTESIANTRANSFORM_H_
#define _HELPME_STANDALONE_CARTESIANTRANSFORM_H_
// original file: ../src/matrix.h
// BEGINLICENSE
//
// This file is part of helPME, which is distributed under the BSD 3-clause license,
// as described in the LICENSE file in the top level directory of this project.
//
// Author: Andrew C. Simmonett
//
// ENDLICENSE
#ifndef _HELPME_STANDALONE_MATRIX_H_
#define _HELPME_STANDALONE_MATRIX_H_
#include <functional>
#include <algorithm>
#include <complex>
#include <fstream>
#include <functional>
#include <initializer_list>
#include <iostream>
#include <iomanip>
#include <numeric>
#include <stdexcept>
#include <tuple>
// original file: ../src/lapack_wrapper.h
// BEGINLICENSE
//
// This file is part of helPME, which is distributed under the BSD 3-clause license,
// as described in the LICENSE file in the top level directory of this project.
//
// Author: Andrew C. Simmonett
//
// ENDLICENSE
//
// The code for Jacobi diagonalization is taken (with minimal modification) from
//
// http://www.mymathlib.com/c_source/matrices/eigen/jacobi_cyclic_method.c
//
#ifndef _HELPME_STANDALONE_LAPACK_WRAPPER_H_
#define _HELPME_STANDALONE_LAPACK_WRAPPER_H_
#include <cmath>
#include <limits>
namespace helpme {
////////////////////////////////////////////////////////////////////////////////
// void Jacobi_Cyclic_Method //
// (Real eigenvalues[], Real *eigenvectors, Real *A, int n) //
// //
// Description: //
// Find the eigenvalues and eigenvectors of a symmetric n x n matrix A //
// using the Jacobi method. Upon return, the input matrix A will have //
// been modified. //
// The Jacobi procedure for finding the eigenvalues and eigenvectors of a //
// symmetric matrix A is based on finding a similarity transformation //
// which diagonalizes A. The similarity transformation is given by a //
// product of a sequence of orthogonal (rotation) matrices each of which //
// annihilates an off-diagonal element and its transpose. The rotation //
// effects only the rows and columns containing the off-diagonal element //
// and its transpose, i.e. if a[i][j] is an off-diagonal element, then //
// the orthogonal transformation rotates rows a[i][] and a[j][], and //
// equivalently it rotates columns a[][i] and a[][j], so that a[i][j] = 0 //
// and a[j][i] = 0. //
// The cyclic Jacobi method considers the off-diagonal elements in the //
// following order: (0,1),(0,2),...,(0,n-1),(1,2),...,(n-2,n-1). If the //
// the magnitude of the off-diagonal element is greater than a treshold, //
// then a rotation is performed to annihilate that off-diagnonal element. //
// The process described above is called a sweep. After a sweep has been //
// completed, the threshold is lowered and another sweep is performed //
// with the new threshold. This process is completed until the final //
// sweep is performed with the final threshold. //
// The orthogonal transformation which annihilates the matrix element //
// a[k][m], k != m, is Q = q[i][j], where q[i][j] = 0 if i != j, i,j != k //
// i,j != m and q[i][j] = 1 if i = j, i,j != k, i,j != m, q[k][k] = //
// q[m][m] = cos(phi), q[k][m] = -sin(phi), and q[m][k] = sin(phi), where //
// the angle phi is determined by requiring a[k][m] -> 0. This condition //
// on the angle phi is equivalent to //
// cot(2 phi) = 0.5 * (a[k][k] - a[m][m]) / a[k][m] //
// Since tan(2 phi) = 2 tan(phi) / (1 - tan(phi)^2), //
// tan(phi)^2 + 2cot(2 phi) * tan(phi) - 1 = 0. //
// Solving for tan(phi), choosing the solution with smallest magnitude, //
// tan(phi) = - cot(2 phi) + sgn(cot(2 phi)) sqrt(cot(2phi)^2 + 1). //
// Then cos(phi)^2 = 1 / (1 + tan(phi)^2) and sin(phi)^2 = 1 - cos(phi)^2 //
// Finally by taking the sqrts and assigning the sign to the sin the same //
// as that of the tan, the orthogonal transformation Q is determined. //
// Let A" be the matrix obtained from the matrix A by applying the //
// similarity transformation Q, since Q is orthogonal, A" = Q'AQ, where Q'//
// is the transpose of Q (which is the same as the inverse of Q). Then //
// a"[i][j] = Q'[i][p] a[p][q] Q[q][j] = Q[p][i] a[p][q] Q[q][j], //
// where repeated indices are summed over. //
// If i is not equal to either k or m, then Q[i][j] is the Kronecker //
// delta. So if both i and j are not equal to either k or m, //
// a"[i][j] = a[i][j]. //
// If i = k, j = k, //
// a"[k][k] = //
// a[k][k]*cos(phi)^2 + a[k][m]*sin(2 phi) + a[m][m]*sin(phi)^2 //
// If i = k, j = m, //
// a"[k][m] = a"[m][k] = 0 = //
// a[k][m]*cos(2 phi) + 0.5 * (a[m][m] - a[k][k])*sin(2 phi) //
// If i = k, j != k or m, //
// a"[k][j] = a"[j][k] = a[k][j] * cos(phi) + a[m][j] * sin(phi) //
// If i = m, j = k, a"[m][k] = 0 //
// If i = m, j = m, //
// a"[m][m] = //
// a[m][m]*cos(phi)^2 - a[k][m]*sin(2 phi) + a[k][k]*sin(phi)^2 //
// If i= m, j != k or m, //
// a"[m][j] = a"[j][m] = a[m][j] * cos(phi) - a[k][j] * sin(phi) //
// //
// If X is the matrix of normalized eigenvectors stored so that the ith //
// column corresponds to the ith eigenvalue, then AX = X Lamda, where //
// Lambda is the diagonal matrix with the ith eigenvalue stored at //
// Lambda[i][i], i.e. X'AX = Lambda and X is orthogonal, the eigenvectors //
// are normalized and orthogonal. So, X = Q1 Q2 ... Qs, where Qi is //
// the ith orthogonal matrix, i.e. X can be recursively approximated by //
// the recursion relation X" = X Q, where Q is the orthogonal matrix and //
// the initial estimate for X is the identity matrix. //
// If j = k, then x"[i][k] = x[i][k] * cos(phi) + x[i][m] * sin(phi), //
// if j = m, then x"[i][m] = x[i][m] * cos(phi) - x[i][k] * sin(phi), and //
// if j != k and j != m, then x"[i][j] = x[i][j]. //
// //
// Arguments: //
// Real eigenvalues //
// Array of dimension n, which upon return contains the eigenvalues of //
// the matrix A. //
// Real* eigenvectors //
// Matrix of eigenvectors, the ith column of which contains an //
// eigenvector corresponding to the ith eigenvalue in the array //
// eigenvalues. //
// Real* A //
// Pointer to the first element of the symmetric n x n matrix A. The //
// input matrix A is modified during the process. //
// int n //
// The dimension of the array eigenvalues, number of columns and rows //
// of the matrices eigenvectors and A. //
// //
// Return Values: //
// Function is of type void. //
// //
// Example: //
// #define N //
// Real A[N][N], Real eigenvalues[N], Real eigenvectors[N][N] //
// //
// (your code to initialize the matrix A ) //
// //
// JacobiCyclicDiagonalization(eigenvalues, (Real*)eigenvectors, //
// (Real *) A, N); //
////////////////////////////////////////////////////////////////////////////////
template <typename Real>
void JacobiCyclicDiagonalization(Real *eigenvalues, Real *eigenvectors, const Real *A, int n) {
int i, j, k, m;
Real *pAk, *pAm, *p_r, *p_e;
Real threshold_norm;
Real threshold;
Real tan_phi, sin_phi, cos_phi, tan2_phi, sin2_phi, cos2_phi;
Real sin_2phi, cos_2phi, cot_2phi;
Real dum1;
Real dum2;
Real dum3;
Real max;
// Take care of trivial cases
if (n < 1) return;
if (n == 1) {
eigenvalues[0] = *A;
*eigenvectors = 1;
return;
}
// Initialize the eigenvalues to the identity matrix.
for (p_e = eigenvectors, i = 0; i < n; i++)
for (j = 0; j < n; p_e++, j++)
if (i == j)
*p_e = 1;
else
*p_e = 0;
// Calculate the threshold and threshold_norm.
for (threshold = 0, pAk = const_cast<Real *>(A), i = 0; i < (n - 1); pAk += n, i++)
for (j = i + 1; j < n; j++) threshold += *(pAk + j) * *(pAk + j);
threshold = sqrt(threshold + threshold);
threshold_norm = threshold * std::numeric_limits<Real>::epsilon();
max = threshold + 1;
while (threshold > threshold_norm) {
threshold /= 10;
if (max < threshold) continue;
max = 0;
for (pAk = const_cast<Real *>(A), k = 0; k < (n - 1); pAk += n, k++) {
for (pAm = pAk + n, m = k + 1; m < n; pAm += n, m++) {
if (std::abs(*(pAk + m)) < threshold) continue;
// Calculate the sin and cos of the rotation angle which
// annihilates A[k][m].
cot_2phi = 0.5f * (*(pAk + k) - *(pAm + m)) / *(pAk + m);
dum1 = sqrt(cot_2phi * cot_2phi + 1);
if (cot_2phi < 0) dum1 = -dum1;
tan_phi = -cot_2phi + dum1;
tan2_phi = tan_phi * tan_phi;
sin2_phi = tan2_phi / (1 + tan2_phi);
cos2_phi = 1 - sin2_phi;
sin_phi = sqrt(sin2_phi);
if (tan_phi < 0) sin_phi = -sin_phi;
cos_phi = sqrt(cos2_phi);
sin_2phi = 2 * sin_phi * cos_phi;
cos_2phi = cos2_phi - sin2_phi;
// Rotate columns k and m for both the matrix A
// and the matrix of eigenvectors.
p_r = const_cast<Real *>(A);
dum1 = *(pAk + k);
dum2 = *(pAm + m);
dum3 = *(pAk + m);
*(pAk + k) = dum1 * cos2_phi + dum2 * sin2_phi + dum3 * sin_2phi;
*(pAm + m) = dum1 * sin2_phi + dum2 * cos2_phi - dum3 * sin_2phi;
*(pAk + m) = 0;
*(pAm + k) = 0;
for (i = 0; i < n; p_r += n, i++) {
if ((i == k) || (i == m)) continue;
if (i < k)
dum1 = *(p_r + k);
else
dum1 = *(pAk + i);
if (i < m)
dum2 = *(p_r + m);
else
dum2 = *(pAm + i);
dum3 = dum1 * cos_phi + dum2 * sin_phi;
if (i < k)
*(p_r + k) = dum3;
else
*(pAk + i) = dum3;
dum3 = -dum1 * sin_phi + dum2 * cos_phi;
if (i < m)
*(p_r + m) = dum3;
else
*(pAm + i) = dum3;
}
for (p_e = eigenvectors, i = 0; i < n; p_e += n, i++) {
dum1 = *(p_e + k);
dum2 = *(p_e + m);
*(p_e + k) = dum1 * cos_phi + dum2 * sin_phi;
*(p_e + m) = -dum1 * sin_phi + dum2 * cos_phi;
}
}
for (i = 0; i < n; i++)
if (i == k)
continue;
else if (max < std::abs(*(pAk + i)))
max = std::abs(*(pAk + i));
}
}
for (pAk = const_cast<Real *>(A), k = 0; k < n; pAk += n, k++) eigenvalues[k] = *(pAk + k);
}
} // Namespace helpme
#endif // Header guard
// original file: ../src/string_utils.h
// BEGINLICENSE
//
// This file is part of helPME, which is distributed under the BSD 3-clause license,
// as described in the LICENSE file in the top level directory of this project.
//
// Author: Andrew C. Simmonett
//
// ENDLICENSE
#ifndef _HELPME_STANDALONE_STRING_UTIL_H_
#define _HELPME_STANDALONE_STRING_UTIL_H_
#include <complex>
#include <iomanip>
#include <iostream>
#include <sstream>
#include <string>
namespace helpme {
/*!
* \brief makes a string representation of a floating point number.
* \param width the width used to display the number.
* \param precision the precision used to display the number.
* \return the string representation of the floating point number.
*/
template <typename T, typename std::enable_if<std::is_floating_point<T>::value, int>::type = 0>
std::string formatNumber(const T &number, int width, int precision) {
std::stringstream stream;
stream.setf(std::ios::fixed, std::ios::floatfield);
stream << std::setw(width) << std::setprecision(precision) << number;
return stream.str();
}
/*!
* \brief makes a string representation of a complex number.
* \param width the width used to display the real and the imaginary components.
* \param precision the precision used to display the real and the imaginary components.
* \return the string representation of the complex number.
*/
template <typename T, typename std::enable_if<!std::is_floating_point<T>::value, int>::type = 0>
std::string formatNumber(const T &number, int width, int precision) {
std::stringstream stream;
stream.setf(std::ios::fixed, std::ios::floatfield);
stream << "(" << std::setw(width) << std::setprecision(precision) << number.real() << ", " << std::setw(width)
<< std::setprecision(precision) << number.imag() << ")";
return stream.str();
}
/*!
* \brief makes a string representation of a multdimensional tensor, stored in a flat array.
* \param data pointer to the start of the array holding the tensor information.
* \param size the length of the array holding the tensor information.
* \param rowDim the dimension of the fastest running index.
* \param width the width of each individual floating point number.
* \param precision used to display each floating point number.
* \return the string representation of the tensor.
*/
template <typename T>
std::string stringify(T *data, size_t size, size_t rowDim, int width = 14, int precision = 8) {
std::stringstream stream;
for (size_t ind = 0; ind < size; ++ind) {
stream << formatNumber(data[ind], width, precision);
if (ind % rowDim == rowDim - 1)
stream << std::endl;
else
stream << " ";
}
return stream.str();
}
} // Namespace helpme
#endif // Header guard
// original file: ../src/memory.h
// BEGINLICENSE
//
// This file is part of helPME, which is distributed under the BSD 3-clause license,
// as described in the LICENSE file in the top level directory of this project.
//
// Author: Andrew C. Simmonett
//
// ENDLICENSE
#ifndef _HELPME_STANDALONE_MEMORY_H_
#define _HELPME_STANDALONE_MEMORY_H_
#include <stdexcept>
#include <vector>
#include <fftw3.h>
namespace helpme {
/*!
* \brief FFTWAllocator a class to handle aligned allocation of memory using the FFTW libraries.
* Code is adapted from http://www.josuttis.com/cppcode/myalloc.hpp.html.
*/
template <class T>
class FFTWAllocator {
public:
// type definitions
typedef T value_type;
typedef T* pointer;
typedef const T* const_pointer;
typedef T& reference;
typedef const T& const_reference;
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
// rebind allocator to type U
template <class U>
struct rebind {
typedef FFTWAllocator<U> other;
};
// return address of values
pointer address(reference value) const { return &value; }
const_pointer address(const_reference value) const { return &value; }
/* constructors and destructor
* - nothing to do because the allocator has no state
*/
FFTWAllocator() throw() {}
FFTWAllocator(const FFTWAllocator&) throw() {}
template <class U>
FFTWAllocator(const FFTWAllocator<U>&) throw() {}
~FFTWAllocator() throw() {}
FFTWAllocator& operator=(FFTWAllocator other) throw() {}
template <class U>
FFTWAllocator& operator=(FFTWAllocator<U> other) throw() {}
// return maximum number of elements that can be allocated
size_type max_size() const throw() { return std::numeric_limits<std::size_t>::max() / sizeof(T); }
// allocate but don't initialize num elements of type T
pointer allocate(size_type num, const void* = 0) { return static_cast<pointer>(fftw_malloc(num * sizeof(T))); }
// initialize elements of allocated storage p with value value
void construct(pointer p, const T& value) {
// initialize memory with placement new
new ((void*)p) T(value);
}
// destroy elements of initialized storage p
void destroy(pointer p) {}
// deallocate storage p of deleted elements
void deallocate(pointer p, size_type num) { fftw_free(static_cast<void*>(p)); }
};
// return that all specializations of this allocator are interchangeable
template <class T1, class T2>
bool operator==(const FFTWAllocator<T1>&, const FFTWAllocator<T2>&) throw() {
return true;
}
template <class T1, class T2>
bool operator!=(const FFTWAllocator<T1>&, const FFTWAllocator<T2>&) throw() {
return false;
}
template <typename Real>
using vector = std::vector<Real, FFTWAllocator<Real>>;
} // Namespace helpme
#endif // Header guard
namespace helpme {
/*!
* A helper function to transpose a dense matrix in place, gratuitously stolen from
* https://stackoverflow.com/questions/9227747/in-place-transposition-of-a-matrix
*/
template <class RandomIterator>
void transposeMemoryInPlace(RandomIterator first, RandomIterator last, int m) {
const int mn1 = (last - first - 1);
const int n = (last - first) / m;
std::vector<bool> visited(last - first);
RandomIterator cycle = first;
while (++cycle != last) {
if (visited[cycle - first]) continue;
int a = cycle - first;
do {
a = a == mn1 ? mn1 : (n * a) % mn1;
std::swap(*(first + a), *cycle);
visited[a] = true;
} while ((first + a) != cycle);
}
}
/*!
* \brief The Matrix class is designed to serve as a convenient wrapper to simplify 2D matrix operations.
* It assumes dense matrices with contiguious data and the fast running index being the right
* (column) index. The underlying memory may have already been allocated elsewhere by C, Fortran
* or Python, and is directly manipulated in place, saving an expensive copy operation. To provide
* read-only access to such memory address, use a const template type.
*/
template <typename Real>
class Matrix {
protected:
/// The number of rows in the matrix.
size_t nRows_;
/// The number of columns in the matrix.
size_t nCols_;
/// A vector to conveniently allocate data, if we really need to.
helpme::vector<Real> allocatedData_;
/// Pointer to the raw data, whose allocation may not be controlled by us.
Real* data_;
public:
enum class SortOrder { Ascending, Descending };
inline const Real& operator()(int row, int col) const { return *(data_ + row * nCols_ + col); }
inline const Real& operator()(const std::pair<int, int>& indices) const {
return *(data_ + std::get<0>(indices) * nCols_ + std::get<1>(indices));
}
inline Real& operator()(int row, int col) { return *(data_ + row * nCols_ + col); }
inline Real& operator()(const std::pair<int, int>& indices) {
return *(data_ + std::get<0>(indices) * nCols_ + std::get<1>(indices));
}
inline const Real* operator[](int row) const { return data_ + row * nCols_; }
inline Real* operator[](int row) { return data_ + row * nCols_; }
Real* begin() const { return data_; }
Real* end() const { return data_ + nRows_ * nCols_; }
const Real* cbegin() const { return data_; }
const Real* cend() const { return data_ + nRows_ * nCols_; }
/*!
* \brief The sliceIterator struct provides a read-only view of a sub-block of a matrix, with arbitrary size.
*/
struct sliceIterator {
Real *begin_, *end_, *ptr_;
size_t stride_;
sliceIterator(Real* start, Real* end, size_t stride) : begin_(start), end_(end), ptr_(start), stride_(stride) {}
sliceIterator begin() const { return sliceIterator(begin_, end_, stride_); }
sliceIterator end() const { return sliceIterator(end_, end_, 0); }
sliceIterator cbegin() const { return sliceIterator(begin_, end_, stride_); }
sliceIterator cend() const { return sliceIterator(end_, end_, 0); }
bool operator!=(const sliceIterator& other) { return ptr_ != other.ptr_; }
sliceIterator operator*=(Real val) {
for (auto& element : *this) element *= val;
return *this;
}
sliceIterator operator/=(Real val) {
Real invVal = 1 / val;
for (auto& element : *this) element *= invVal;
return *this;
}
sliceIterator operator-=(Real val) {
for (auto& element : *this) element -= val;
return *this;
}
sliceIterator operator+=(Real val) {
for (auto& element : *this) element += val;
return *this;
}
sliceIterator operator++() {
ptr_ += stride_;
return *this;
}
const Real& operator[](size_t index) const { return *(begin_ + index); }
size_t size() const { return std::distance(begin_, end_) / stride_; }
void assertSameSize(const sliceIterator& other) const {
if (size() != other.size())
throw std::runtime_error("Slice operations only supported for slices of the same size.");
}
void assertContiguous(const sliceIterator& iter) const {
if (iter.stride_ != 1)
throw std::runtime_error(
"Slice operations called on operation that is only allowed for contiguous data.");
}
Matrix<Real> operator-(const sliceIterator& other) const {
assertSameSize(other);
assertContiguous(*this);
assertContiguous(other);
Matrix ret(1, size());
std::transform(begin_, end_, other.begin_, ret[0],
[](const Real& a, const Real& b) -> Real { return a - b; });
return ret;
}
sliceIterator operator-=(const sliceIterator& other) const {
assertSameSize(other);
assertContiguous(*this);
assertContiguous(other);
std::transform(begin_, end_, other.begin_, begin_,
[](const Real& a, const Real& b) -> Real { return a - b; });
return *this;
}
sliceIterator operator+=(const sliceIterator& other) const {
assertSameSize(other);
assertContiguous(*this);
assertContiguous(other);
std::transform(begin_, end_, other.begin_, begin_,
[](const Real& a, const Real& b) -> Real { return a + b; });
return *this;
}
Real& operator*() { return *ptr_; }
};
/*!
* \brief row returns a read-only iterator over a given row.
* \param r the row to return.
* \return the slice in memory corresponding to the rth row.
*/
sliceIterator row(size_t r) const { return sliceIterator(data_ + r * nCols_, data_ + (r + 1) * nCols_, 1); }
/*!
* \brief col returns a read-only iterator over a given column.
* \param c the column to return.
* \return the slice in memory corresponding to the cth column.
*/
sliceIterator col(size_t c) const { return sliceIterator(data_ + c, data_ + nRows_ * nCols_ + c, nCols_); }
/*!
* \return the number of rows in this matrix.
*/
size_t nRows() const { return nRows_; }
/*!
* \return the number of columns in this matrix.
*/
size_t nCols() const { return nCols_; }
/*!
* \brief Matrix Constructs an empty matrix.
*/
Matrix() : nRows_(0), nCols_(0) {}
/*!
* \brief Matrix Constructs a new matrix, allocating memory.
* \param nRows the number of rows in the matrix.
* \param nCols the number of columns in the matrix.
*/
Matrix(size_t nRows, size_t nCols)
: nRows_(nRows), nCols_(nCols), allocatedData_(nRows * nCols, 0), data_(allocatedData_.data()) {}
/*!
* \brief Matrix Constructs a new matrix, allocating memory.
* \param filename the ASCII file from which to read this matrix
*/
Matrix(const std::string& filename) {
Real tmp;
std::ifstream inFile(filename);
if (!inFile) {
std::string msg("Unable to open file ");
msg += filename;
throw std::runtime_error(msg);
}
inFile >> nRows_;
inFile >> nCols_;
while (inFile >> tmp) allocatedData_.push_back(tmp);
inFile.close();
if (nRows_ * nCols_ != allocatedData_.size()) {
allocatedData_.clear();
std::string msg("Inconsistent dimensions in ");
msg += filename;
msg += ". Amount of data inconsitent with declared size.";
throw std::runtime_error(msg);
}
allocatedData_.shrink_to_fit();
data_ = allocatedData_.data();
}
/*!
* \brief Matrix Constructs a new matrix, allocating memory and initializing values using the braced initializer.
* \param data a braced initializer list of braced initializer lists containing the values to be stored in the
* matrix.
*/
Matrix(std::initializer_list<std::initializer_list<Real>> data) {
nRows_ = data.size();
nCols_ = nRows_ ? data.begin()->size() : 0;
allocatedData_.reserve(nRows_ * nCols_);
for (auto& row : data) {
if (row.size() != nCols_) throw std::runtime_error("Inconsistent row dimensions in matrix specification.");
allocatedData_.insert(allocatedData_.end(), row.begin(), row.end());
}
data_ = allocatedData_.data();
}
/*!
* \brief Matrix Constructs a new column vector, allocating memory and initializing values using the braced
* initializer. \param data a braced initializer list of braced initializer lists containing the values to be stored
* in the matrix.
*/
Matrix(std::initializer_list<Real> data) : allocatedData_(data), data_(allocatedData_.data()) {
nRows_ = data.size();
nCols_ = 1;
}
/*!
* \brief Matrix Constructs a new matrix using already allocated memory.
* \param ptr the already-allocated memory underlying this matrix.
* \param nRows the number of rows in the matrix.
* \param nCols the number of columns in the matrix.
*/
Matrix(Real* ptr, size_t nRows, size_t nCols) : nRows_(nRows), nCols_(nCols), data_(ptr) {}
/*!
* \brief cast make a copy of this matrix, with its elements cast as a different type.
* \tparam NewReal the type to cast each element to.
* \return the copy of the matrix with the new type.
*/
template <typename NewReal>
Matrix<NewReal> cast() const {
Matrix<NewReal> newMat(nRows_, nCols_);
NewReal* newPtr = newMat[0];
const Real* dataPtr = data_;
for (size_t addr = 0; addr < nRows_ * nCols_; ++addr) *newPtr++ = static_cast<NewReal>(*dataPtr++);
return newMat;
}
/*!
* \brief setConstant sets all elements of this matrix to a specified value.
* \param value the value to set each element to.
*/
void setConstant(Real value) { std::fill(begin(), end(), value); }
/*!
* \brief setZero sets each element of this matrix to zero.
*/
void setZero() { setConstant(0); }
/*!
* \brief isNearZero checks that each element in this matrix has an absolute value below some threshold.
* \param threshold the value below which an element is considered zero.
* \return whether all values are near zero or not.
*/
bool isNearZero(Real threshold = 1e-10f) const {
return !std::any_of(cbegin(), cend(), [&](const Real& val) { return std::abs(val) > threshold; });
}
/*!
* \brief inverse inverts this matrix, leaving the original matrix untouched.
* \return the inverse of this matrix.
*/
Matrix inverse() const {
assertSquare();
Matrix matrixInverse(nRows_, nRows_);
if (nRows() == 3) {
// 3x3 is a really common case, so treat it here as.
Real determinant = data_[0] * (data_[4] * data_[8] - data_[7] * data_[5]) -
data_[1] * (data_[3] * data_[8] - data_[5] * data_[6]) +
data_[2] * (data_[3] * data_[7] - data_[4] * data_[6]);
Real determinantInverse = 1 / determinant;
matrixInverse.data_[0] = (data_[4] * data_[8] - data_[7] * data_[5]) * determinantInverse;
matrixInverse.data_[1] = (data_[2] * data_[7] - data_[1] * data_[8]) * determinantInverse;
matrixInverse.data_[2] = (data_[1] * data_[5] - data_[2] * data_[4]) * determinantInverse;
matrixInverse.data_[3] = (data_[5] * data_[6] - data_[3] * data_[8]) * determinantInverse;
matrixInverse.data_[4] = (data_[0] * data_[8] - data_[2] * data_[6]) * determinantInverse;
matrixInverse.data_[5] = (data_[3] * data_[2] - data_[0] * data_[5]) * determinantInverse;
matrixInverse.data_[6] = (data_[3] * data_[7] - data_[6] * data_[4]) * determinantInverse;
matrixInverse.data_[7] = (data_[6] * data_[1] - data_[0] * data_[7]) * determinantInverse;
matrixInverse.data_[8] = (data_[0] * data_[4] - data_[3] * data_[1]) * determinantInverse;
} else {
// Generic case; just use spectral decomposition, invert the eigenvalues, and stitch back together.
// Note that this only works for symmetric matrices. Need to hook into Lapack for a general
// inversion routine if this becomes a limitation.
return this->applyOperation([](Real& element) { element = 1 / element; });
}
return matrixInverse;
}
/*!
* \brief assertSymmetric checks that this matrix is symmetric within some threshold.
* \param threshold the value below which an pair's difference is considered zero.
*/
void assertSymmetric(const Real& threshold = 1e-10f) const {
assertSquare();
for (int row = 0; row < nRows_; ++row) {
for (int col = 0; col < row; ++col) {
if (std::abs(data_[row * nCols_ + col] - data_[col * nCols_ + row]) > threshold)
throw std::runtime_error("Unexpected non-symmetric matrix found.");
}
}
}
/*!
* \brief applyOperationToEachElement modifies every element in the matrix by applying an operation.
* \param function a unary operator describing the operation to perform.
*/
void applyOperationToEachElement(const std::function<void(Real&)>& fxn) { std::for_each(begin(), end(), fxn); }
/*!
* \brief applyOperation applies an operation to this matrix using the spectral decomposition,
* leaving the original untouched. Only for symmetric matrices, as coded.
* \param function a undary operator describing the operation to perform.
* \return the matrix transformed by the operator.
*/
Matrix applyOperation(const std::function<void(Real&)>& function) const {
assertSymmetric();
auto eigenPairs = this->diagonalize();
Matrix evalsReal = std::get<0>(eigenPairs);
Matrix evecs = std::get<1>(eigenPairs);
evalsReal.applyOperationToEachElement(function);
Matrix evecsT = evecs.transpose();
for (int row = 0; row < nRows_; ++row) {
Real transformedEigenvalue = evalsReal[row][0];
std::for_each(evecsT.data_ + row * nCols_, evecsT.data_ + (row + 1) * nCols_,
[&](Real& val) { val *= transformedEigenvalue; });
}
return evecs * evecsT;
}
/*!
* \brief assertSameSize make sure that this Matrix has the same dimensions as another Matrix.
* \param other the matrix to compare to.
*/
void assertSameSize(const Matrix& other) const {
if (nRows_ != other.nRows_ || nCols_ != other.nCols_)
throw std::runtime_error("Attepting to compare matrices of different sizes!");
}
/*!
* \brief assertSquare make sure that this Matrix is square.
*/
void assertSquare() const {
if (nRows_ != nCols_)
throw std::runtime_error("Attepting to perform a square matrix operation on a non-square matrix!");
}
/*!
* \brief multiply this matrix with another, returning a new matrix containing the product.
* \param other the right hand side of the matrix product.
* \return the product of this matrix with the matrix other.
*/
Matrix multiply(const Matrix& other) const {
// TODO one fine day this should be replaced by GEMM calls, if matrix multiplies actually get used much.
if (nCols_ != other.nRows_)
throw std::runtime_error("Attempting to multiply matrices with incompatible dimensions.");
Matrix product(nRows_, other.nCols_);
Real* output = product.data_;
for (int row = 0; row < nRows_; ++row) {
const Real* rowPtr = data_ + row * nCols_;
for (int col = 0; col < other.nCols_; ++col) {
for (int link = 0; link < nCols_; ++link) {
*output += rowPtr[link] * other.data_[link * other.nCols_ + col];
}
++output;
}
}
return product;
}
/*!
* \brief operator * a convenient wrapper for the multiply function.
* \param other the right hand side of the matrix product.
* \return the product of this matrix with the matrix other.
*/
Matrix operator*(const Matrix& other) const { return this->multiply(other); }
/*!
* \brief operator * scale a copy of this matrix by a constant, leaving the orignal untouched.
* \param scaleFac the scale factor to apply.
* \return the scaled version of this matrix.
*/
Matrix operator*(Real scaleFac) const {
auto scaled = this->clone();
scaled.applyOperationToEachElement([&](Real& element) { element *= scaleFac; });
return scaled;
}
/*!
* \brief increment this matrix with another, returning a new matrix containing the sum.
* \param other the right hand side of the matrix sum.
* \return the sum of this matrix and the matrix other.
*/
Matrix incrementWith(const Matrix& other) {
assertSameSize(other);
std::transform(begin(), end(), other.begin(), begin(),
[](const Real& a, const Real& b) -> Real { return a + b; });
return *this;
}
/*!
* \brief a wrapper around the incrementWith() function.
* \param other the right hand side of the matrix sum.
* \return the sum of this matrix and the matrix other.
*/
Matrix operator+=(const Matrix& other) { return this->incrementWith(other); }
/*!
* \brief increment every element of this matrix by a constant another, returning a new matrix containing the sum.
* \param other the right hand side of the matrix sum.
* \return the sum of this matrix and the matrix other.
*/
Matrix incrementWith(const Real& shift) {
std::for_each(begin(), end(), [shift](Real& a) { a += shift; });
return *this;
}
/*!
* \brief a wrapper around the incrementWith() function.
* \param shift the scalar to increment each value by
* \return the sum of this matrix and the matrix other.
*/
Matrix operator+=(const Real& shift) { return this->incrementWith(shift); }
/*!
* \brief almostEquals checks that two matrices have all elements the same, within some specificied tolerance.
* \param other the matrix against which we're comparing.
* \param tol the amount that each element is allowed to deviate by.
* \return whether the two matrices are almost equal.
*/
template <typename T = Real, typename std::enable_if<std::is_floating_point<T>::value, int>::type = 0>
bool almostEquals(const Matrix& other, Real tolerance = 1e-6) const {
// The floating point version
assertSameSize(other);
return std::equal(cbegin(), cend(), other.cbegin(), [&tolerance](Real a, Real b) -> bool {
return (((a - b) < std::real(tolerance)) && ((a - b) > -std::real(tolerance)));
});
}
template <typename T = Real, typename std::enable_if<!std::is_floating_point<T>::value, int>::type = 0>
bool almostEquals(const Matrix& other, Real tolerance = 1e-6) const {
// The complex version
assertSameSize(other);
auto tol = std::real(tolerance);
// This is a little confusing, but the type "Real" is actually some king of std::complex<...>.
return std::equal(cbegin(), cend(), other.cbegin(), [&tol](Real a, Real b) -> bool {
return (((a.real() - b.real()) < tol) && ((a.real() - b.real()) > -tol) && ((a.imag() - b.imag()) < tol) &&
((a.imag() - b.imag()) > -tol));
});
}
/*!
* \brief dot computes the inner product of this matrix with another.
* \param other the other matrix in the inner product, which must have the same dimensions.
* \return the inner product.
*/
Real dot(const Matrix& other) const {
assertSameSize(other);
return std::inner_product(cbegin(), cend(), other.cbegin(), Real(0));
}
/*!
* \brief writeToFile formats the matrix and writes to an ASCII file.
* \param fileName the name of the file to save to.
* \param width the width of each matrix element's formatted representation.
* \param precision the precision of each matrix element's formatted representation.
* \param printDimensions whether to print the dimensions at the top of the file.
*/
void writeToFile(const std::string& filename, int width = 20, int precision = 14,
bool printDimensions = false) const {
std::ofstream file;
file.open(filename, std::ios::out);
if (printDimensions) file << nRows_ << " " << nCols_ << std::endl;
file << stringify(data_, nRows_ * nCols_, nCols_, width, precision);
file.close();
}
/*!
* \brief write formatted matrix to a stream object.
* \param os stream object to write to.
* \return modified stream object.
*/
std::ostream& write(std::ostream& os) const {
for (int row = 0; row < nRows_; ++row) os << stringify(data_ + row * nCols_, nCols_, nCols_);
os << std::endl;
return os;
}
/*!
* \brief transposeInPlace transposes this matrix in place.
*/
void transposeInPlace() {
transposeMemoryInPlace(begin(), end(), nCols_);
std::swap(nCols_, nRows_);
}
/*!
* \brief clone make a new copy of this matrix by deep copying the data.
* \return the copy of this matrix.
*/
Matrix clone() const {
Matrix newMatrix = Matrix(nRows_, nCols_);
std::copy(cbegin(), cend(), newMatrix.begin());
return newMatrix;
}
/*!
* \brief transpose this matrix, leaving the original untouched.
* \return a transposed deep copy of this matrix.
*/
Matrix transpose() const {
Matrix copy = this->clone();
copy.transposeInPlace();
return copy;
}
/*!
* \brief diagonalize diagonalize this matrix, leaving the original untouched. Note that this assumes
* that this matrix is real and symmetric.
* \param order how to order the (eigenvalue,eigenvector) pairs, where the sort key is the eigenvalue.
* \return a pair of corresponding <eigenvalue , eigenvectors> sorted according to the order variable.
* The eigenvectors are stored by column.
*/
std::pair<Matrix<Real>, Matrix<Real>> diagonalize(SortOrder order = SortOrder::Ascending) const {
assertSymmetric();
Matrix eigenValues(nRows_, 1);
Matrix unsortedEigenVectors(nRows_, nRows_);
Matrix sortedEigenVectors(nRows_, nRows_);
JacobiCyclicDiagonalization<Real>(eigenValues[0], unsortedEigenVectors[0], cbegin(), nRows_);
unsortedEigenVectors.transposeInPlace();
std::vector<std::pair<Real, const Real*>> eigenPairs;
for (int val = 0; val < nRows_; ++val) eigenPairs.push_back({eigenValues[val][0], unsortedEigenVectors[val]});
std::sort(eigenPairs.begin(), eigenPairs.end());
if (order == SortOrder::Descending) std::reverse(eigenPairs.begin(), eigenPairs.end());
for (int val = 0; val < nRows_; ++val) {
const auto& e = eigenPairs[val];
eigenValues.data_[val] = std::get<0>(e);
std::copy(std::get<1>(e), std::get<1>(e) + nCols_, sortedEigenVectors[val]);
}
sortedEigenVectors.transposeInPlace();
return {std::move(eigenValues), std::move(sortedEigenVectors)};
}
};
/*!
* A helper function to allow printing of Matrix objects to a stream.
*/
template <typename Real>
std::ostream& operator<<(std::ostream& os, Matrix<Real> const& m) {
return m.write(os);
}
} // Namespace helpme
#endif // Header guard
#include <vector>
namespace helpme {
static inline int cartesianAddress(int lx, int ly, int lz) {
int l = lx + ly + lz;
return lz * (2 * l - lz + 3) / 2 + ly;
}
/*!
* \brief makeCartesianRotationMatrix builds a rotation matrix for unique Cartesian
* components with a given angular momentum. The algorithm used here is the simple
* version (eq. 18) from D. M. Elking, J. Comp. Chem., 37 2067 (2016). It's definitely
* not the fastest way to do it, but will be revisited if profiling shows it to be an issue.
* \param angularMomentum the angular momentum of the rotation matrix desired.
* \param transformer the matrix R to do the transform defined for a dipole as µ_new = R . µ_old.
* \return the rotation matrix
*/
template <typename Real>
Matrix<Real> makeCartesianRotationMatrix(int angularMomentum, const Matrix<Real> &transformer) {
Real R00 = transformer[0][0];
Real R01 = transformer[0][1];
Real R02 = transformer[0][2];
Real R10 = transformer[1][0];
Real R11 = transformer[1][1];
Real R12 = transformer[1][2];
Real R20 = transformer[2][0];
Real R21 = transformer[2][1];
Real R22 = transformer[2][2];
int nComponents = (angularMomentum + 1) * (angularMomentum + 2) / 2;
auto factorial = std::vector<int>(2 * angularMomentum + 1);
factorial[0] = 1;
for (int l = 1; l <= 2 * angularMomentum; ++l) factorial[l] = l * factorial[l - 1];
Matrix<Real> R(nComponents, nComponents);
for (int nz = 0; nz <= angularMomentum; ++nz) {
for (int ny = 0; ny <= angularMomentum - nz; ++ny) {
int nx = angularMomentum - ny - nz;
for (int pz = 0; pz <= nx; ++pz) {
for (int py = 0; py <= nx - pz; ++py) {
int px = nx - py - pz;
for (int qz = 0; qz <= ny; ++qz) {
for (int qy = 0; qy <= ny - qz; ++qy) {
int qx = ny - qy - qz;
for (int rz = 0; rz <= nz; ++rz) {
for (int ry = 0; ry <= nz - rz; ++ry) {
int rx = nz - ry - rz;
int mx = px + qx + rx;
int my = py + qy + ry;
int mz = pz + qz + rz;
int m = mx + my + mz;
if (m == angularMomentum) {
Real normx = factorial[mx] / (factorial[px] * factorial[qx] * factorial[rx]);
Real normy = factorial[my] / (factorial[py] * factorial[qy] * factorial[ry]);
Real normz = factorial[mz] / (factorial[pz] * factorial[qz] * factorial[rz]);
Real Rx = std::pow(R00, px) * std::pow(R10, py) * std::pow(R20, pz);
Real Ry = std::pow(R01, qx) * std::pow(R11, qy) * std::pow(R21, qz);
Real Rz = std::pow(R02, rx) * std::pow(R12, ry) * std::pow(R22, rz);
Real term = normx * normy * normz * Rx * Ry * Rz;
R[cartesianAddress(mx, my, mz)][cartesianAddress(nx, ny, nz)] += term;
}
}
}
}
}
}
}
}
}
return R;
}
/*!
* \brief matrixVectorProduct A naive implementation of matrix-vector products, avoiding BLAS requirements (for now).
* \param transformer the transformation matrix.
* \param inputVector the vector to be transformed.
* \param outputVector the transformed vector.
*/
template <typename Real>
void matrixVectorProduct(const Matrix<Real> &transformer, const Real *inputVector, Real *outputVector) {
int dimension = transformer.nRows();
for (int row = 0; row < dimension; ++row) {
outputVector[row] = std::inner_product(inputVector, inputVector + dimension, transformer[row], Real(0));
}
}
/*!
* \brief cartesianTransform transforms a list of a cartesian quantities to a different basis.
* Assumes a list of quantities are to be transformed (in place) and all angular momentum
* components up to and including the specified maximum are present in ascending A.M. order.
* \param maxAngularMomentum the angular momentum of the incoming quantity.
* \param transformOnlyThisShell if true, only the shell with angular momentum specified will be transformed
* \param transformer the matrix R to do the transform defined for a dipole as µ_new = R . µ_old.
* \param transformee the quantity to be transformed, stored as nAtoms X nComponents, with
* components being the fast running index.
*/
template <typename Real>
Matrix<Real> cartesianTransform(int maxAngularMomentum, bool transformOnlyThisShell, const Matrix<Real> &transformer,
const Matrix<Real> &transformee) {
Matrix<Real> transformed = transformee.clone();
int offset = transformOnlyThisShell ? 0 : 1;
int nAtoms = transformee.nRows();
int firstShell = transformOnlyThisShell ? maxAngularMomentum : 1;
for (int angularMomentum = firstShell; angularMomentum <= maxAngularMomentum; ++angularMomentum) {
auto rotationMatrix = makeCartesianRotationMatrix(angularMomentum, transformer);
for (int atom = 0; atom < nAtoms; ++atom) {
const Real *inputData = transformee[atom];
Real *outputData = transformed[atom];
matrixVectorProduct(rotationMatrix, inputData + offset, outputData + offset);
}
offset += (angularMomentum + 1) * (angularMomentum + 2) / 2;
}
return transformed;
}
} // Namespace helpme
#endif // Header guard
// original file: ../src/fftw_wrapper.h
// BEGINLICENSE
//
// This file is part of helPME, which is distributed under the BSD 3-clause license,
// as described in the LICENSE file in the top level directory of this project.
//
// Author: Andrew C. Simmonett
//
// ENDLICENSE
#ifndef _HELPME_STANDALONE_FFTW_WRAPPER_H_
#define _HELPME_STANDALONE_FFTW_WRAPPER_H_
#include <complex>
#include <iostream>
#include <limits>
#include <stdexcept>
#include <type_traits>
#include <fftw3.h>
// #include "memory.h"
namespace helpme {
/*!
* \brief The FFTWTypes class is a placeholder to lookup function names and types in FFTW parlance by template.
*/
template <typename Real>
struct FFTWTypes {
// This is just a default implementation that does nothing - we just need to be able to instantiate something
// in order to query the isImplemented member at runtime to check if the desired precision model was compiled in.
struct EmptyPlan {
int unused;
};
using Plan = void *;
using Complex = std::complex<int>;
static Plan makePlan4(size_t, void *, void *, int) { return 0; };
static Plan makePlan5(size_t, void *, void *, int, int) { return 0; };
static void cleanFFTW(){};
static void execPlan1(Plan){};
static void execPlan3(Plan, void *, void *){};
static constexpr bool isImplemented = false;
static constexpr decltype(&makePlan4) MakeRealToComplexPlan = &makePlan4;
static constexpr decltype(&makePlan4) MakeComplexToRealPlan = &makePlan4;
static constexpr decltype(&makePlan5) MakeComplexToComplexPlan = &makePlan5;
static constexpr decltype(&execPlan3) ExecuteRealToComplexPlan = &execPlan3;
static constexpr decltype(&execPlan3) ExecuteComplexToRealPlan = &execPlan3;
static constexpr decltype(&execPlan3) ExecuteComplexToComplexPlan = &execPlan3;
static constexpr decltype(&execPlan1) DestroyPlan = &execPlan1;
static constexpr decltype(&cleanFFTW) CleanupFFTW = nullptr;
};
#if HAVE_FFTWF == 1
template <>
struct FFTWTypes<float> {
using Plan = fftwf_plan;
using Complex = fftwf_complex;
static constexpr bool isImplemented = true;
static constexpr decltype(&fftwf_plan_dft_r2c_1d) MakeRealToComplexPlan = &fftwf_plan_dft_r2c_1d;
static constexpr decltype(&fftwf_plan_dft_c2r_1d) MakeComplexToRealPlan = &fftwf_plan_dft_c2r_1d;
static constexpr decltype(&fftwf_plan_dft_1d) MakeComplexToComplexPlan = &fftwf_plan_dft_1d;
static constexpr decltype(&fftwf_execute_dft_r2c) ExecuteRealToComplexPlan = &fftwf_execute_dft_r2c;
static constexpr decltype(&fftwf_execute_dft_c2r) ExecuteComplexToRealPlan = &fftwf_execute_dft_c2r;
static constexpr decltype(&fftwf_execute_dft) ExecuteComplexToComplexPlan = &fftwf_execute_dft;
static constexpr decltype(&fftwf_destroy_plan) DestroyPlan = &fftwf_destroy_plan;
static constexpr decltype(&fftwf_cleanup) CleanupFFTW = &fftwf_cleanup;
};
#endif // HAVE_FFTWF
#if HAVE_FFTWD == 1
template <>
struct FFTWTypes<double> {
using Plan = fftw_plan;
using Complex = fftw_complex;
static constexpr bool isImplemented = true;
static constexpr decltype(&fftw_plan_dft_r2c_1d) MakeRealToComplexPlan = &fftw_plan_dft_r2c_1d;
static constexpr decltype(&fftw_plan_dft_c2r_1d) MakeComplexToRealPlan = &fftw_plan_dft_c2r_1d;
static constexpr decltype(&fftw_plan_dft_1d) MakeComplexToComplexPlan = &fftw_plan_dft_1d;
static constexpr decltype(&fftw_execute_dft_r2c) ExecuteRealToComplexPlan = &fftw_execute_dft_r2c;
static constexpr decltype(&fftw_execute_dft_c2r) ExecuteComplexToRealPlan = &fftw_execute_dft_c2r;
static constexpr decltype(&fftw_execute_dft) ExecuteComplexToComplexPlan = &fftw_execute_dft;
static constexpr decltype(&fftw_destroy_plan) DestroyPlan = &fftw_destroy_plan;
static constexpr decltype(&fftw_cleanup) CleanupFFTW = &fftw_cleanup;
};
#endif // HAVE_FFTWD
#if HAVE_FFTWL == 1
template <>
struct FFTWTypes<long double> {
using Plan = fftwl_plan;
using Complex = fftwl_complex;
static constexpr bool isImplemented = true;
static constexpr decltype(&fftwl_plan_dft_r2c_1d) MakeRealToComplexPlan = &fftwl_plan_dft_r2c_1d;
static constexpr decltype(&fftwl_plan_dft_c2r_1d) MakeComplexToRealPlan = &fftwl_plan_dft_c2r_1d;
static constexpr decltype(&fftwl_plan_dft_1d) MakeComplexToComplexPlan = &fftwl_plan_dft_1d;
static constexpr decltype(&fftwl_execute_dft_r2c) ExecuteRealToComplexPlan = &fftwl_execute_dft_r2c;
static constexpr decltype(&fftwl_execute_dft_c2r) ExecuteComplexToRealPlan = &fftwl_execute_dft_c2r;
static constexpr decltype(&fftwl_execute_dft) ExecuteComplexToComplexPlan = &fftwl_execute_dft;
static constexpr decltype(&fftwl_destroy_plan) DestroyPlan = &fftwl_destroy_plan;
static constexpr decltype(&fftwl_cleanup) CleanupFFTW = &fftwl_cleanup;
};
#endif // HAVE_FFTWL
/*!
* \brief The FFTWWrapper class is a convenient wrapper to abstract away the details of different
* precision modes for FFTW, where the types and function names differ.
*/
template <typename Real>
class FFTWWrapper {
using typeinfo = FFTWTypes<Real>;
using Plan = typename typeinfo::Plan;
using Complex = typename typeinfo::Complex;
protected:
/// An FFTW plan object, describing out of place complex to complex forward transforms.
typename typeinfo::Plan forwardPlan_ = nullptr;
/// An FFTW plan object, describing out of place complex to complex inverse transforms.
typename typeinfo::Plan inversePlan_ = nullptr;
/// An FFTW plan object, describing in place complex to complex forward transforms.
typename typeinfo::Plan forwardInPlacePlan_ = nullptr;
/// An FFTW plan object, describing in place complex to complex inverse transforms.
typename typeinfo::Plan inverseInPlacePlan_ = nullptr;
/// An FFTW plan object, describing out of place real to complex forward transforms.
typename typeinfo::Plan realToComplexPlan_ = nullptr;
/// An FFTW plan object, describing out of place complex to real inverse transforms.
typename typeinfo::Plan complexToRealPlan_ = nullptr;
/// The size of the real data.
size_t fftDimension_;
/// The flags to be passed to the FFTW plan creator, to determine startup cost.
unsigned transformFlags_;
public:
FFTWWrapper() {}
FFTWWrapper(size_t fftDimension) : fftDimension_(fftDimension), transformFlags_(FFTW_ESTIMATE) {
if (!typeinfo::isImplemented) {
throw std::runtime_error(
"Attempting to call FFTW using a precision mode that has not been linked. "
"Make sure that -DHAVE_FFTWF=1, -DHAVE_FFTWD=1 or -DHAVE_FFTWL=1 is added to the compiler flags"
"for single, double and long double precision support, respectively.");
}
helpme::vector<Real> realTemp(fftDimension_);
helpme::vector<std::complex<Real>> complexTemp1(fftDimension_);
helpme::vector<std::complex<Real>> complexTemp2(fftDimension_);
Real *realPtr = realTemp.data();
Complex *complexPtr1 = reinterpret_cast<Complex *>(complexTemp1.data());
Complex *complexPtr2 = reinterpret_cast<Complex *>(complexTemp2.data());
forwardPlan_ =
typeinfo::MakeComplexToComplexPlan(fftDimension_, complexPtr1, complexPtr2, FFTW_FORWARD, transformFlags_);
inversePlan_ =
typeinfo::MakeComplexToComplexPlan(fftDimension_, complexPtr1, complexPtr2, FFTW_BACKWARD, transformFlags_);
forwardInPlacePlan_ =
typeinfo::MakeComplexToComplexPlan(fftDimension_, complexPtr1, complexPtr1, FFTW_FORWARD, transformFlags_);
inverseInPlacePlan_ =
typeinfo::MakeComplexToComplexPlan(fftDimension_, complexPtr1, complexPtr1, FFTW_BACKWARD, transformFlags_);
realToComplexPlan_ = typeinfo::MakeRealToComplexPlan(fftDimension_, realPtr, complexPtr1, transformFlags_);
complexToRealPlan_ = typeinfo::MakeComplexToRealPlan(fftDimension_, complexPtr1, realPtr, transformFlags_);
}
FFTWWrapper(const FFTWWrapper &other) = delete;
FFTWWrapper(FFTWWrapper &&other) = delete;
FFTWWrapper &operator=(const FFTWWrapper &other) = delete;
FFTWWrapper &operator=(FFTWWrapper &&other) {
std::swap(forwardPlan_, other.forwardPlan_);
std::swap(forwardInPlacePlan_, other.forwardInPlacePlan_);
std::swap(inversePlan_, other.inversePlan_);
std::swap(inverseInPlacePlan_, other.inverseInPlacePlan_);
std::swap(realToComplexPlan_, other.realToComplexPlan_);
std::swap(complexToRealPlan_, other.complexToRealPlan_);
std::swap(fftDimension_, other.fftDimension_);
std::swap(transformFlags_, other.transformFlags_);
return *this;
}
~FFTWWrapper() {
if (forwardPlan_) typeinfo::DestroyPlan(forwardPlan_);
if (inversePlan_) typeinfo::DestroyPlan(inversePlan_);
if (forwardInPlacePlan_) typeinfo::DestroyPlan(forwardInPlacePlan_);
if (inverseInPlacePlan_) typeinfo::DestroyPlan(inverseInPlacePlan_);
if (realToComplexPlan_) typeinfo::DestroyPlan(realToComplexPlan_);
if (complexToRealPlan_) typeinfo::DestroyPlan(complexToRealPlan_);
}
/*!
* \brief transform call FFTW to do an out of place complex to real FFT.
* \param inBuffer the location of the input data.
* \param outBuffer the location of the output data.
*/
void transform(std::complex<Real> *inBuffer, Real *outBuffer) {
typeinfo::ExecuteComplexToRealPlan(complexToRealPlan_, reinterpret_cast<Complex *>(inBuffer), outBuffer);
}
/*!
* \brief transform call FFTW to do an out of place real to complex FFT.
* \param inBuffer the location of the input data.
* \param outBuffer the location of the output data.
*/
void transform(Real *inBuffer, std::complex<Real> *outBuffer) {
typeinfo::ExecuteRealToComplexPlan(realToComplexPlan_, inBuffer, reinterpret_cast<Complex *>(outBuffer));
}
/*!
* \brief transform call FFTW to do an in place complex to complex FFT.
* \param inPlaceBuffer the location of the input and output data.
* \param direction either FFTW_FORWARD or FFTW_BACKWARD.
*/
void transform(std::complex<Real> *inPlaceBuffer, int direction) {
Complex *inPlacePtr = reinterpret_cast<Complex *>(inPlaceBuffer);
switch (direction) {
case FFTW_FORWARD:
typeinfo::ExecuteComplexToComplexPlan(forwardInPlacePlan_, inPlacePtr, inPlacePtr);
break;
case FFTW_BACKWARD:
typeinfo::ExecuteComplexToComplexPlan(inverseInPlacePlan_, inPlacePtr, inPlacePtr);
break;
default:
throw std::runtime_error("Invalid FFTW transform passed to in place transform().");
}
}
/*!
* \brief transform call FFTW to do an out of place complex to complex FFT.
* \param inBuffer the location of the input data.
* \param outBuffer the location of the output data.
* \param direction either FFTW_FORWARD or FFTW_BACKWARD.
*/
void transform(std::complex<Real> *inBuffer, std::complex<Real> *outBuffer, int direction) {
Complex *inPtr = reinterpret_cast<Complex *>(inBuffer);
Complex *outPtr = reinterpret_cast<Complex *>(outBuffer);
switch (direction) {
case FFTW_FORWARD:
typeinfo::ExecuteComplexToComplexPlan(forwardPlan_, inPtr, outPtr);
break;
case FFTW_BACKWARD:
typeinfo::ExecuteComplexToComplexPlan(inversePlan_, inPtr, outPtr);
break;
default:
throw std::runtime_error("Invalid FFTW transform passed to transform().");
}
}
};
} // Namespace helpme
#endif // Header guard
// original file: ../src/gamma.h
// BEGINLICENSE
//
// This file is part of helPME, which is distributed under the BSD 3-clause license,
// as described in the LICENSE file in the top level directory of this project.
//
// Author: Andrew C. Simmonett
//
// ENDLICENSE
#ifndef _HELPME_STANDALONE_GAMMA_H_
#define _HELPME_STANDALONE_GAMMA_H_
#include <cmath>
#include <limits>
/*!
* \file gamma.h
* \brief Contains C++ implementations of templated gamma and incomplete gamma functions, computed using recursion.
*/
namespace helpme {
#define HELPME_SQRTTWO std::sqrt(static_cast<Real>(2))
#define HELPME_SQRTPI static_cast<Real>(1.77245385090551602729816748334114518279754945612238712821381L)
#define HELPME_PI static_cast<Real>(3.14159265358979323846264338327950288419716939937510582097494L)
/*!
* Compute upper incomplete gamma functions for positive half-integral s values using the recursion
* \f$ \Gamma[\frac{\mathrm{twoS}}{2},x] = \Gamma[\frac{\mathrm{twoS}-2}{2},x] + x^{\frac{\mathrm{twoS}-2}{2}}e^{-x}\f$
*/
template <typename Real, int twoS, bool isPositive>
struct incompleteGammaRecursion {
static Real compute(Real x) {
return (0.5f * twoS - 1) * incompleteGammaRecursion<Real, twoS - 2, isPositive>::compute(x) +
pow(x, (0.5f * twoS - 1)) * exp(-x);
}
};
/*!
* Compute upper incomplete gamma functions for negative half-integral s values using the recursion
* \f$ \Gamma[\frac{\mathrm{twoS}}{2},x] = \frac{2\Gamma[\frac{\mathrm{twoS}+2}{2},x] -
* 2x^\frac{\mathrm{twoS}}{2}e^{-x}}{\mathrm{twoS}}\f$
*/
template <typename Real, int twoS>
struct incompleteGammaRecursion<Real, twoS, false> {
static Real compute(Real x) {
return (incompleteGammaRecursion<Real, twoS + 2, false>::compute(x) - pow(x, 0.5f * twoS) * exp(-x)) /
(0.5f * twoS);
}
};
/// Specific value of incomplete gamma function.
template <typename Real>
struct incompleteGammaRecursion<Real, 2, true> {
static Real compute(Real x) { return exp(-x); }
};
/// Specific value of incomplete gamma function.
template <typename Real>
struct incompleteGammaRecursion<Real, 1, false> {
static Real compute(Real x) { return HELPME_SQRTPI * erfc(std::sqrt(x)); }
};
/// Specific value of incomplete gamma function.
template <typename Real>
struct incompleteGammaRecursion<Real, 1, true> {
static Real compute(Real x) { return HELPME_SQRTPI * erfc(std::sqrt(x)); }
};
/// Specific value of incomplete gamma function.
template <typename Real>
struct incompleteGammaRecursion<Real, 0, false> {
static Real compute(Real x) {
// Gamma(0,x) is (minus) the exponential integral of -x. This implementation was stolen from
// http://www.mymathlib.com/c_source/functions/exponential_integrals/exponential_integral_Ei.c
x = -x;
if (x < -5.0L) return -(Real)Continued_Fraction_Ei(x);
if (x == 0.0L) return std::numeric_limits<Real>::max();
if (x < 6.8L) return -(Real)Power_Series_Ei(x);
if (x < 50.0L) return -(Real)Argument_Addition_Series_Ei(x);
return -(Real)Continued_Fraction_Ei(x);
}
private:
static constexpr long double epsilon = 10.0 * std::numeric_limits<long double>::epsilon();
////////////////////////////////////////////////////////////////////////////////
// static long double Continued_Fraction_Ei( long double x ) //
// //
// Description: //
// For x < -5 or x > 50, the continued fraction representation of Ei //
// converges fairly rapidly. //
// //
// The continued fraction expansion of Ei(x) is: //
// Ei(x) = -exp(x) { 1/(-x+1-) 1/(-x+3-) 4/(-x+5-) 9/(-x+7-) ... }. //
// //
// //
// Arguments: //
// long double x //
// The argument of the exponential integral Ei(). //
// //
// Return Value: //
// The value of the exponential integral Ei evaluated at x. //
////////////////////////////////////////////////////////////////////////////////
static long double Continued_Fraction_Ei(long double x) {
long double Am1 = 1.0L;
long double A0 = 0.0L;
long double Bm1 = 0.0L;
long double B0 = 1.0L;
long double a = std::exp(x);
long double b = -x + 1.0L;
long double Ap1 = b * A0 + a * Am1;
long double Bp1 = b * B0 + a * Bm1;
int j = 1;
a = 1.0L;
while (std::fabs(Ap1 * B0 - A0 * Bp1) > epsilon * std::fabs(A0 * Bp1)) {
if (std::fabs(Bp1) > 1.0L) {
Am1 = A0 / Bp1;
A0 = Ap1 / Bp1;
Bm1 = B0 / Bp1;
B0 = 1.0L;
} else {
Am1 = A0;
A0 = Ap1;
Bm1 = B0;
B0 = Bp1;
}
a = -j * j;
b += 2.0L;
Ap1 = b * A0 + a * Am1;
Bp1 = b * B0 + a * Bm1;
j += 1;
}
return (-Ap1 / Bp1);
}
////////////////////////////////////////////////////////////////////////////////
// static long double Power_Series_Ei( long double x ) //
// //
// Description: //
// For -5 < x < 6.8, the power series representation for (Ei(x) - gamma //
// - ln|x|)/exp(x) is used, where gamma is Euler's gamma constant. //
// Note that for x = 0.0, Ei is -inf. In which case -DBL_MAX is //
// returned. //
// //
// The power series expansion of (Ei(x) - gamma - ln|x|) / exp(x) is //
// - Sum(1 + 1/2 + ... + 1/j) (-x)^j / j!, where the Sum extends //
// from j = 1 to inf. //
// //
// Arguments: //
// long double x //
// The argument of the exponential integral Ei(). //
// //
// Return Value: //
// The value of the exponential integral Ei evaluated at x. //
////////////////////////////////////////////////////////////////////////////////
static long double Power_Series_Ei(long double x) {
long double xn = -x;
long double Sn = -x;
long double Sm1 = 0.0L;
long double hsum = 1.0L;
long double g = 0.5772156649015328606065121L;
long double y = 1.0L;
long double factorial = 1.0L;
while (std::fabs(Sn - Sm1) > epsilon * std::fabs(Sm1)) {
Sm1 = Sn;
y += 1.0L;
xn *= (-x);
factorial *= y;
hsum += (1.0 / y);
Sn += hsum * xn / factorial;
}
return (g + std::log(std::fabs(x)) - std::exp(x) * Sn);
}
////////////////////////////////////////////////////////////////////////////////
// static long double Argument_Addition_Series_Ei(long double x) //
// //
// Description: //
// For 6.8 < x < 50.0, the argument addition series is used to calculate //
// Ei. //
// //
// The argument addition series for Ei(x) is: //
// Ei(x+dx) = Ei(x) + exp(x) Sum j! [exp(j) expj(-dx) - 1] / x^(j+1), //
// where the Sum extends from j = 0 to inf, |x| > |dx| and expj(y) is //
// the exponential polynomial expj(y) = Sum y^k / k!, //
// the Sum extending from k = 0 to k = j. //
// //
// Arguments: //
// long double x //
// The argument of the exponential integral Ei(). //
// //
// Return Value: //
// The value of the exponential integral Ei evaluated at x. //
////////////////////////////////////////////////////////////////////////////////
static long double Argument_Addition_Series_Ei(long double x) {
static long double ei[] = {
1.915047433355013959531e2L, 4.403798995348382689974e2L, 1.037878290717089587658e3L,
2.492228976241877759138e3L, 6.071406374098611507965e3L, 1.495953266639752885229e4L,
3.719768849068903560439e4L, 9.319251363396537129882e4L, 2.349558524907683035782e5L,
5.955609986708370018502e5L, 1.516637894042516884433e6L, 3.877904330597443502996e6L,
9.950907251046844760026e6L, 2.561565266405658882048e7L, 6.612718635548492136250e7L,
1.711446713003636684975e8L, 4.439663698302712208698e8L, 1.154115391849182948287e9L,
3.005950906525548689841e9L, 7.842940991898186370453e9L, 2.049649711988081236484e10L,
5.364511859231469415605e10L, 1.405991957584069047340e11L, 3.689732094072741970640e11L,
9.694555759683939661662e11L, 2.550043566357786926147e12L, 6.714640184076497558707e12L,
1.769803724411626854310e13L, 4.669055014466159544500e13L, 1.232852079912097685431e14L,
3.257988998672263996790e14L, 8.616388199965786544948e14L, 2.280446200301902595341e15L,
6.039718263611241578359e15L, 1.600664914324504111070e16L, 4.244796092136850759368e16L,
1.126348290166966760275e17L, 2.990444718632336675058e17L, 7.943916035704453771510e17L,
2.111342388647824195000e18L, 5.614329680810343111535e18L, 1.493630213112993142255e19L,
3.975442747903744836007e19L, 1.058563689713169096306e20L};
int k = (int)(x + 0.5f);
int j = 0;
long double xx = (long double)k;
long double dx = x - xx;
long double xxj = xx;
long double edx = std::exp(dx);
long double Sm = 1.0L;
long double Sn = (edx - 1.0L) / xxj;
long double term = std::numeric_limits<double>::max();
long double factorial = 1.0L;
long double dxj = 1.0L;
while (std::fabs(term) > epsilon * std::fabs(Sn)) {
j++;
factorial *= (long double)j;
xxj *= xx;
dxj *= (-dx);
Sm += (dxj / factorial);
term = (factorial * (edx * Sm - 1.0L)) / xxj;
Sn += term;
}
return ei[k - 7] + Sn * std::exp(xx);
}
};
/*!
* Compute gamma function for positive half-integral s values using the recursion.
* \f$ \Gamma[\frac{\mathrm{twoS}}{2}] = \Gamma[\frac{\mathrm{twoS}-2}{2}]\frac{\mathrm{twoS}-2}{2} \f$
*/
template <typename Real, int twoS, bool isPositive>
struct gammaRecursion {
static constexpr Real value = gammaRecursion<Real, twoS - 2, isPositive>::value * (0.5f * twoS - 1);
};
/*!
* Compute gamma function for negative half-integral s values using the recursion.
* \f$ \Gamma[\frac{\mathrm{twoS}}{2}] = \frac{2\Gamma[\frac{\mathrm{twoS}_2}{2}]}{\mathrm{twoS}} \f$
* Returns infinity (expressed as the largest value representable by Real) for \f$twoS = 0, -2, -4, -6, \ldots\f$ .
*/
template <typename Real, int twoS>
struct gammaRecursion<Real, twoS, false> {
static constexpr Real value = gammaRecursion<Real, twoS + 2, false>::value == std::numeric_limits<Real>::max()
? std::numeric_limits<Real>::max()
: gammaRecursion<Real, twoS + 2, false>::value / (0.5f * twoS);
};
/// Specific value of the Gamma function.
template <typename Real>
struct gammaRecursion<Real, 0, false> {
static constexpr Real value = std::numeric_limits<Real>::max();
};
/// Specific value of the Gamma function.
template <typename Real>
struct gammaRecursion<Real, 1, true> {
static constexpr Real value = HELPME_SQRTPI;
};
/// Specific value of the Gamma function.
template <typename Real>
struct gammaRecursion<Real, 1, false> {
static constexpr Real value = HELPME_SQRTPI;
};
/// Specific value of the Gamma function.
template <typename Real>
struct gammaRecursion<Real, 2, true> {
static constexpr Real value = 1;
};
/// Specific value of the Gamma function.
template <typename Real>
struct gammaRecursion<Real, 2, false> {
static constexpr Real value = 1;
};
/*!
* \class incompleteGammaComputer
* \brief Computes the upper incomplete Gamma function.
* \f$ \Gamma[s,x] = \int_x^\infty t^{s-1} e^{-t} \mathrm{d}t \f$
* In this code we only need half integral arguments for \f$s\f$, and only positive \f$x\f$ arguments.
* \tparam Real the floating point type to use for arithmetic.
* \tparam twoS twice the s value required.
*/
template <typename Real, int twoS>
struct incompleteGammaComputer {
/*!
* \brief Computes the incomplete gamma function.
* \param x value required.
* \return \f$\Gamma[\frac{\mathrm{twoS}}{2}, x^2]\f$.
*/
static Real compute(Real x) { return incompleteGammaRecursion<Real, twoS, (twoS > 0)>::compute(x); }
};
/*!
* Compute upper incomplete gamma functions for positive half-integral s values using the recursion
* \f$ \Gamma[\frac{\mathrm{twoS}}{2},x] = \Gamma[\frac{\mathrm{twoS}-2}{2},x] + x^{\frac{\mathrm{twoS}-2}{2}}e^{-x}\f$
*/
template <typename Real, int twoS, bool isPositive>
struct incompleteVirialGammaRecursion {
static std::pair<Real, Real> compute(Real x) {
Real gamma = incompleteGammaComputer<Real, twoS>::compute(x);
return {gamma, (0.5f * twoS) * gamma + pow(x, (0.5f * twoS)) * exp(-x)};
}
};
/*!
* Compute upper incomplete gamma functions for negative half-integral s values using the recursion
* \f$ \Gamma[\frac{\mathrm{twoS}}{2},x] = \frac{2\Gamma[\frac{\mathrm{twoS}+2}{2},x] -
* 2x^\frac{\mathrm{twoS}}{2}e^{-x}}{\mathrm{twoS}}\f$
*/
template <typename Real, int twoS>
struct incompleteVirialGammaRecursion<Real, twoS, false> {
static std::pair<Real, Real> compute(Real x) {
Real gamma = incompleteGammaComputer<Real, twoS + 2>::compute(x);
return {(gamma - pow(x, 0.5f * twoS) * exp(-x)) / (0.5f * twoS), gamma};
}
};
/*!
* \class incompleteGammaVirialComputer
* \brief Computes the upper incomplete Gamma function for two different values: s and s+1.
* \f$ \Gamma[s,x] = \int_x^\infty t^{s-1} e^{-t} \mathrm{d}t \f$
* In this code we only need half integral arguments for \f$s\f$, and only positive \f$x\f$ arguments.
* \tparam Real the floating point type to use for arithmetic.
* \tparam twoS twice the s value required.
*/
template <typename Real, int twoS>
struct incompleteGammaVirialComputer {
/*!
* \brief Computes the incomplete gamma function for argument twoS and twoS+2.
* \param x value required.
* \return \f$\Gamma[\frac{\mathrm{twoS}}{2}, x]\f$ and \f$\Gamma[\frac{\mathrm{twoS+2}}{2}, x]\f$.
*/
static std::pair<Real, Real> compute(Real x) {
return incompleteVirialGammaRecursion<Real, twoS, (twoS >= 0)>::compute(x);
}
};
/*!
* \class gammaComputer
* \brief Computes the Gamma function.
* \f$ \Gamma[s] = \int_0^\infty t^{s-1} e^{-t} \mathrm{d}t \f$
* In this code we only need half integral values for the \f$s\f$ argument, so the input
* argument \f$s\f$ will yield \f$\Gamma[\frac{s}{2}]\f$.
* \tparam Real the floating point type to use for arithmetic.
* \tparam twoS twice the s value required.
*/
template <typename Real, int twoS>
struct gammaComputer {
/// The value of \f$\Gamma[\frac{\mathrm{twos}}{2}]\f$
static constexpr Real value = gammaRecursion<Real, twoS, (twoS > 0)>::value;
};
/*!
* \brief Computes the Gamma function using recursion instead of template metaprogramming.
* \f$ \Gamma[s] = \int_0^\infty t^{s-1} e^{-t} \mathrm{d}t \f$
* In this code we only need half integral values for the \f$s\f$ argument, so the input
* argument \f$s\f$ will yield \f$\Gamma[\frac{s}{2}]\f$.
* \tparam Real the floating point type to use for arithmetic.
* \param twoS twice the s value required.
*/
template <typename Real>
Real nonTemplateGammaComputer(int twoS) {
if (twoS == 1) {
return HELPME_SQRTPI;
} else if (twoS == 2) {
return 1;
} else if (twoS <= 0 && twoS % 2 == 0) {
return std::numeric_limits<Real>::max();
} else if (twoS > 0) {
return nonTemplateGammaComputer<Real>(twoS - 2) * (0.5f * twoS - 1);
} else {
return nonTemplateGammaComputer<Real>(twoS + 2) / (0.5f * twoS);
}
}
} // Namespace helpme
#endif // Header guard
// original file: ../src/gridsize.h
// BEGINLICENSE
//
// This file is part of helPME, which is distributed under the BSD 3-clause license,
// as described in the LICENSE file in the top level directory of this project.
//
// Author: Andrew C. Simmonett
//
// ENDLICENSE
#ifndef _HELPME_STANDALONE_GRIDSIZE_H_
#define _HELPME_STANDALONE_GRIDSIZE_H_
#include <algorithm>
#include <cmath>
#include <initializer_list>
#include <vector>
namespace helpme {
// N.B. The templates here are just to avoid multiple definitions in the .so file.
/*!
* \brief allDivisors checks that a list of values are divisors of a given input value.
* \param gridSize the gridSize to check for divisors.
* \param requiredDivisors the list of divisors.
* \return whether all listed values are divisors of gridSize.
*/
template <typename T>
bool allDivisors(T gridSize, const std::initializer_list<T> &requiredDivisors) {
for (const T &divisor : requiredDivisors)
if (gridSize % divisor) return false;
return true;
}
/*!
* \brief findGridSize FFTW likes to have transformations with dimensions of the form
*
* a b c d e f
* 2 3 5 7 11 13
*
* where a,b,c and d are general and e+f is either 0 or 1. MKL has similar demands:
*
* https://software.intel.com/en-us/articles/fft-length-and-layout-advisor/
* http://www.fftw.org/fftw3_doc/Real_002ddata-DFTs.html
*
* This routine will compute the next largest grid size subject to the constraint that the
* resulting size is a multiple of a given factor.
* \param inputSize the minimum size of the grid.
* \param requiredDivisors list of values that must be a factor of the output grid size.
* \return the adjusted grid size.
*/
template <typename T>
int findGridSize(T inputSize, const std::initializer_list<T> &requiredDivisors) {
std::vector<int> primeFactors{2, 3, 5, 7};
T minDivisor = std::min(requiredDivisors);
T currentSize = minDivisor * std::ceil(static_cast<float>(inputSize) / minDivisor);
while (true) {
// Now we know that the grid size is a multiple of requiredFactor, check
// that it satisfies the prime factor requirements stated above.
T remainder = currentSize;
for (const int &factor : primeFactors)
while (remainder > 1 && remainder % factor == 0) remainder /= factor;
if ((remainder == 1 || remainder == 11 || remainder == 13) && allDivisors(currentSize, requiredDivisors))
return currentSize;
currentSize += minDivisor;
}
}
} // Namespace helpme
#endif // Header guard
// #include "matrix.h"
#if HAVE_MKL == 1
#include "mkl.h"
#endif
// #include "memory.h"
#if HAVE_MPI == 1
// original file: ../src/mpi_wrapper.h
// BEGINLICENSE
//
// This file is part of helPME, which is distributed under the BSD 3-clause license,
// as described in the LICENSE file in the top level directory of this project.
//
// Author: Andrew C. Simmonett
//
// ENDLICENSE
#ifndef _HELPME_STANDALONE_MPI_WRAPPER_H_
#define _HELPME_STANDALONE_MPI_WRAPPER_H_
#include <mpi.h>
#include <complex>
#include <iomanip>
#include <iostream>
#include <stdexcept>
namespace helpme {
/*!
* \brief The MPITypes struct abstracts away the MPI_Datatype types for different floating point modes
* using templates to hide the details from the caller.
*/
template <typename Real>
struct MPITypes {
MPI_Datatype realType_;
MPI_Datatype complexType_;
MPITypes() {
throw std::runtime_error("MPI wrapper has not been implemented for the requested floating point type.");
}
};
template <>
MPITypes<float>::MPITypes() : realType_(MPI_FLOAT), complexType_(MPI_C_COMPLEX) {}
template <>
MPITypes<double>::MPITypes() : realType_(MPI_DOUBLE), complexType_(MPI_C_DOUBLE_COMPLEX) {}
template <>
MPITypes<long double>::MPITypes() : realType_(MPI_LONG_DOUBLE), complexType_(MPI_C_LONG_DOUBLE_COMPLEX) {}
/*!
* \brief The MPIWrapper struct is a lightweight C++ wrapper around the C MPI functions. Its main
* purpose is to provide RAII semantics, ensuring that memory is correctly freed. It also
* conveniently abstracts away the different MPI type descriptors for each floating point type.
*/
template <typename Real>
struct MPIWrapper {
MPITypes<Real> types_;
/// The MPI communicator instance to use for all reciprocal space work.
MPI_Comm mpiCommunicator_;
/// The total number of MPI nodes involved in reciprocal space work.
int numNodes_;
/// The MPI rank of this node.
int myRank_;
/// The number of nodes in the X direction.
int numNodesX_;
/// The number of nodes in the Y direction.
int numNodesY_;
/// The number of nodes in the Z direction.
int numNodesZ_;
void assertNodePartitioningValid(int numNodes, int numNodesX, int numNodesY, int numNodesZ) const {
if (numNodes != numNodesX * numNodesY * numNodesZ)
throw std::runtime_error(
"Communicator world size does not match the numNodesX, numNodesY, numNodesZ passed in.");
}
MPIWrapper() : mpiCommunicator_(0), numNodes_(0), myRank_(0) {}
MPIWrapper(const MPI_Comm& communicator, int numNodesX, int numNodesY, int numNodesZ)
: numNodesX_(numNodesX), numNodesY_(numNodesY), numNodesZ_(numNodesZ) {
if (MPI_Comm_dup(communicator, &mpiCommunicator_) != MPI_SUCCESS)
throw std::runtime_error("Problem calling MPI_Comm_dup in MPIWrapper constructor.");
if (MPI_Comm_size(mpiCommunicator_, &numNodes_) != MPI_SUCCESS)
throw std::runtime_error("Problem calling MPI_Comm_size in MPIWrapper constructor.");
if (MPI_Comm_rank(mpiCommunicator_, &myRank_) != MPI_SUCCESS)
throw std::runtime_error("Problem calling MPI_Comm_rank in MPIWrapper constructor.");
assertNodePartitioningValid(numNodes_, numNodesX, numNodesY, numNodesZ);
}
~MPIWrapper() {
if (mpiCommunicator_) MPI_Comm_free(&mpiCommunicator_);
}
/*!
* \brief barrier wait for all members of this communicator to reach this point.
*/
void barrier() {
if (MPI_Barrier(mpiCommunicator_) != MPI_SUCCESS) throw std::runtime_error("Problem in MPI Barrier call!");
}
/*!
* \brief split split this communicator into subgroups.
* \param color the number identifying the subgroup the new communicator belongs to.
* \param key the rank of the new communicator within the subgroup.
* \return the new communicator.
*/
std::unique_ptr<MPIWrapper> split(int color, int key) {
std::unique_ptr<MPIWrapper> newWrapper(new MPIWrapper);
if (MPI_Comm_split(mpiCommunicator_, color, key, &newWrapper->mpiCommunicator_) != MPI_SUCCESS)
throw std::runtime_error("Problem calling MPI_Comm_split in MPIWrapper split.");
if (MPI_Comm_size(newWrapper->mpiCommunicator_, &newWrapper->numNodes_) != MPI_SUCCESS)
throw std::runtime_error("Problem calling MPI_Comm_size in MPIWrapper split.");
if (MPI_Comm_rank(newWrapper->mpiCommunicator_, &newWrapper->myRank_) != MPI_SUCCESS)
throw std::runtime_error("Problem calling MPI_Comm_rank in MPIWrapper split.");
return newWrapper;
}
/*!
* \brief allToAll perform alltoall communication within this communicator.
* \param inBuffer the buffer containing input data.
* \param outBuffer the buffer to send results to.
* \param dimension the number of elements to be communicated.
*/
void allToAll(std::complex<Real>* inBuffer, std::complex<Real>* outBuffer, int dimension) {
if (MPI_Alltoall(inBuffer, 2 * dimension, types_.realType_, outBuffer, 2 * dimension, types_.realType_,
mpiCommunicator_) != MPI_SUCCESS)
throw std::runtime_error("Problem encountered calling MPI alltoall.");
}
/*!
* \brief allToAll perform alltoall communication within this communicator.
* \param inBuffer the buffer containing input data.
* \param outBuffer the buffer to send results to.
* \param dimension the number of elements to be communicated.
*/
void allToAll(Real* inBuffer, Real* outBuffer, int dimension) {
if (MPI_Alltoall(inBuffer, dimension, types_.realType_, outBuffer, dimension, types_.realType_,
mpiCommunicator_) != MPI_SUCCESS)
throw std::runtime_error("Problem encountered calling MPI alltoall.");
}
/*!
* \brief reduce performs a reduction, with summation as the operation.
* \param inBuffer the buffer containing input data.
* \param outBuffer the buffer to send results to.
* \param dimension the number of elements to be reduced.
* \param node the node to reduce the result to (defaulted to zero).
*/
void reduce(Real* inBuffer, Real* outBuffer, int dimension, int node = 0) {
if (MPI_Reduce(inBuffer, outBuffer, dimension, types_.realType_, MPI_SUM, node, mpiCommunicator_) !=
MPI_SUCCESS)
throw std::runtime_error("Problem encountered calling MPI reduce.");
}
/*!
* \brief reduceScatterBlock performs a reduction, with summation as the operation, then scatters to all nodes.
* \param inBuffer the buffer containing input data.
* \param outBuffer the buffer to send results to.
* \param dimension the number of elements to be reduced on each node (currently must be the same on all nodes).
*/
void reduceScatterBlock(Real* inBuffer, Real* outBuffer, int dimension) {
if (MPI_Reduce_scatter_block(inBuffer, outBuffer, dimension, types_.realType_, MPI_SUM, mpiCommunicator_) !=
MPI_SUCCESS)
throw std::runtime_error("Problem encountered calling MPI reducescatter.");
}
/*!
* \brief allGather broadcasts a chunk of data from each node to every other node.
* \param inBuffer the buffer containing input data.
* \param dimension the number of elements to be broadcast.
* \param outBuffer the buffer to send results to.
*/
void allGather(Real* inBuffer, Real* outBuffer, int dimension) {
if (MPI_Allgather(inBuffer, dimension, types_.realType_, outBuffer, dimension, types_.realType_,
mpiCommunicator_) != MPI_SUCCESS)
throw std::runtime_error("Problem encountered calling MPI allgather.");
}
/*!
* \brief operator << a convenience wrapper around ostream, to inject node info.
*/
friend std::ostream& operator<<(std::ostream& os, const MPIWrapper& obj) {
os << "Node " << obj.myRank_ << " of " << obj.numNodes_ << ":" << std::endl;
return os;
}
};
// Adapter to allow piping of streams into unique_ptr-held object
template <typename Real>
std::ostream& operator<<(std::ostream& os, const std::unique_ptr<MPIWrapper<Real>>& obj) {
os << *obj;
return os;
}
// A convenience macro to guarantee that each node prints in order.
#define PRINT(out) \
if (mpiCommunicator_) { \
for (int node = 0; node < mpiCommunicator_->numNodes_; ++node) { \
std::cout.setf(std::ios::fixed, std::ios::floatfield); \
if (node == mpiCommunicator_->myRank_) \
std::cout << mpiCommunicator_ << std::setw(18) << std::setprecision(10) << out << std::endl; \
mpiCommunicator_->barrier(); \
}; \
} else { \
std::cout << std::setw(18) << std::setprecision(10) << out << std::endl; \
}
} // Namespace helpme
#endif // Header guard
#endif
// original file: ../src/powers.h
// BEGINLICENSE
//
// This file is part of helPME, which is distributed under the BSD 3-clause license,
// as described in the LICENSE file in the top level directory of this project.
//
// Author: Andrew C. Simmonett
//
// ENDLICENSE
#ifndef _HELPME_STANDALONE_POWERS_H_
#define _HELPME_STANDALONE_POWERS_H_
#include <cmath>
/*!
* \file powers.h
* \brief Contains template functions to compute various quantities raised to an integer power.
*/
namespace helpme {
template <typename Real, int n>
struct raiseToIntegerPower {
static Real pow(Real val) { return val * raiseToIntegerPower<Real, n - 1>::pow(val); }
};
/// Base recursion for the power.
template <typename Real>
struct raiseToIntegerPower<Real, 0> {
static Real pow(Real) { return 1; }
};
/// n is positive and even case
template <typename Real, int n, bool nIsPositive, bool nIsEven>
struct normIntegerPowerComputer {
static Real compute(Real val) { return raiseToIntegerPower<Real, n / 2>::pow(val); }
};
/// n is positive and odd case
template <typename Real, int n>
struct normIntegerPowerComputer<Real, n, true, false> {
static Real compute(Real val) { return raiseToIntegerPower<Real, n>::pow(std::sqrt(val)); }
};
/// n is negative and even case
template <typename Real, int n>
struct normIntegerPowerComputer<Real, n, false, true> {
static Real compute(Real val) { return raiseToIntegerPower<Real, -n / 2>::pow(1 / val); }
};
/// n is negative and odd case
template <typename Real, int n>
struct normIntegerPowerComputer<Real, n, false, false> {
static Real compute(Real val) { return raiseToIntegerPower<Real, -n>::pow(1 / sqrt(val)); }
};
/*!
* \brief Compute a quantity exponentiated by an integer power, using multiplication,
* at compile time. The exponent is assumed to be positve.
* \tparam Real the floating point type to use for arithmetic.
* \tparam n the exponent to raise the value to.
*/
template <typename Real, int n>
struct raiseNormToIntegerPower {
/*!
* \brief pow compute the norm raised to the power n.
* \param val the square of the norm to be exponentiated.
* \return the norm raised to the integer power.
*/
static Real compute(Real val) { return normIntegerPowerComputer<Real, n, (n >= 0), (n % 2 == 0)>::compute(val); }
};
} // Namespace helpme
#endif // Header guard
// original file: ../src/splines.h
// BEGINLICENSE
//
// This file is part of helPME, which is distributed under the BSD 3-clause license,
// as described in the LICENSE file in the top level directory of this project.
//
// Author: Andrew C. Simmonett
//
// ENDLICENSE
#ifndef _HELPME_STANDALONE_SPLINES_H_
#define _HELPME_STANDALONE_SPLINES_H_
// #include "matrix.h"
/*!
* \file splines.h
* \brief Contains the C++ implementation of a cardinal B-Splines.
*/
namespace helpme {
/*!
* \class BSpline
* \brief A class to compute cardinal B-splines. This code can compute arbitrary-order B-splines of
* arbitrary derivative level, subject to the usual constraint that an order m spline is
* differentiable m-2 times.
* \tparam Real the floating point type to use for arithmetic.
*/
template <typename Real>
class BSpline {
protected:
/// The order of this B-spline.
short order_;
/// The maximum derivative level for this B-spline.
short derivativeLevel_;
/// B-Splines with rows corresponding to derivative level, and columns to spline component.
Matrix<Real> splines_;
/// The grid point at which to start interpolation.
short startingGridPoint_;
/// Makes B-Spline array.
inline void makeSplineInPlace(Real *array, const Real &val, const short &n) const {
Real denom = (Real)1 / (n - 1);
array[n - 1] = denom * val * array[n - 2];
for (short j = 1; j < n - 1; ++j)
array[n - j - 1] = denom * ((val + j) * array[n - j - 2] + (n - j - val) * array[n - j - 1]);
array[0] *= denom * (1 - val);
}
/// Takes BSpline derivative.
inline void differentiateSpline(const Real *array, Real *dArray, const short &n) const {
dArray[0] = -array[0];
for (short j = 1; j < n - 1; ++j) dArray[j] = array[j - 1] - array[j];
dArray[n - 1] = array[n - 2];
}
/*!
* \brief assertSplineIsSufficient ensures that the spline is large enough to be differentiable.
* An mth order B-Spline is differentiable m-2 times.
*/
void assertSplineIsSufficient(int splineOrder, int derivativeLevel) const {
if (splineOrder - derivativeLevel < 2) {
std::string msg(
"The spline order used is not sufficient for the derivative level requested."
"Set the spline order to at least ");
msg += std::to_string(derivativeLevel + 2);
msg += " to run this calculation.";
throw std::runtime_error(msg);
}
}
public:
/// The B-splines and their derivatives. See update() for argument details.
BSpline(short start, Real value, short order, short derivativeLevel) : splines_(derivativeLevel + 1, order) {
update(start, value, order, derivativeLevel);
}
/*!
* \brief update computes information for BSpline, without reallocating memory unless needed.
* \param start the grid point at which to start interpolation.
* \param value the distance (in fractional coordinates) from the starting grid point.
* \param order the order of the BSpline.
* \param derivativeLevel the maximum level of derivative needed for this BSpline.
*/
void update(short start, Real value, short order, short derivativeLevel) {
assertSplineIsSufficient(order, derivativeLevel);
startingGridPoint_ = start;
order_ = order;
derivativeLevel_ = derivativeLevel;
// The +1 is to account for the fact that we need to store entries up to and including the max.
if (splines_.nRows() < derivativeLevel + 1 || splines_.nCols() != order)
splines_ = Matrix<Real>(derivativeLevel + 1, order);
splines_.setZero();
splines_(0, 0) = 1 - value;
splines_(0, 1) = value;
for (short m = 1; m < order_ - 1; ++m) {
makeSplineInPlace(splines_[0], value, m + 2);
if (m >= order_ - derivativeLevel_ - 2) {
short currentDerivative = order_ - m - 2;
for (short l = 0; l < currentDerivative; ++l)
differentiateSpline(splines_[l], splines_[l + 1], m + 2 + currentDerivative);
}
}
}
BSpline() {}
/*!
* \brief The modulus of the B-Spline in Fourier space.
* \param gridDim the dimension of the grid in the dimension this spline is to be used.
* \param mValues if provided, provides the ordering of the m values, if not they are
* ordered as 0, 1, 2, ..., Kmax, -Kmax+1, -Kmax+2, ..., -2, -1.
* \return a gridDim long vector containing the inverse of the Fourier space spline moduli.
*/
helpme::vector<Real> invSplineModuli(short gridDim, std::vector<int> mValues = {}) {
int nKTerms = mValues.size() ? mValues.size() : gridDim;
helpme::vector<Real> splineMods(nKTerms, 0);
Real prefac = 2 * M_PI / gridDim;
for (int m = 0; m < nKTerms; ++m) {
Real real = 0;
Real imag = 0;
int mValue = mValues.size() ? mValues[m] : m;
for (int n = 0; n < order_; ++n) {
Real exparg = mValue * n * prefac;
Real jSpline = splines_(0, n);
real += jSpline * cos(exparg);
imag += jSpline * sin(exparg);
}
splineMods[m] = real * real + imag * imag;
}
// Correct tiny values for conventional PME.
if (!mValues.size()) {
constexpr Real EPS = 1e-7f;
if (splineMods[0] < EPS) splineMods[0] = splineMods[1] / 2;
for (int i = 0; i < gridDim - 1; ++i)
if (splineMods[i] < EPS) splineMods[i] = (splineMods[i - 1] + splineMods[i + 1]) / 2;
if (splineMods[gridDim - 1] < EPS) splineMods[gridDim - 1] = splineMods[gridDim - 2] / 2;
}
// Invert, to avoid division later on.
for (int i = 0; i < nKTerms; ++i) splineMods[i] = 1 / splineMods[i];
return splineMods;
}
/*!
* \brief Gets the grid point to start interpolating from.
* \return the index of the first grid point this spline supports.
*/
short startingGridPoint() const { return startingGridPoint_; }
/*!
* \brief Returns the B-Spline, or derivative thereof.
* \param deriv the derivative level of the spline to be returned.
*/
const Real *operator[](const int &deriv) const { return splines_[deriv]; }
/*!
* \brief Get read-only access to the full spline data.
* \returns a const reference to the full spline data: row index is derivative, col index is spline component.
*/
const Matrix<Real> &splineData() const { return splines_; }
};
} // Namespace helpme
#endif // Header guard
// #include "string_utils.h"
// original file: ../src/tensor_utils.h
// BEGINLICENSE
//
// This file is part of helPME, which is distributed under the BSD 3-clause license,
// as described in the LICENSE file in the top level directory of this project.
//
// Author: Andrew C. Simmonett
//
// ENDLICENSE
#ifndef _HELPME_STANDALONE_TENSOR_UTILS_H_
#define _HELPME_STANDALONE_TENSOR_UTILS_H_
#if HAVE_BLAS == 1
extern "C" {
extern void dgemm_(char *, char *, int *, int *, int *, double *, double *, int *, double *, int *, double *, double *,
int *);
extern void sgemm_(char *, char *, int *, int *, int *, float *, float *, int *, float *, int *, float *, float *,
int *);
}
#endif
namespace helpme {
/*!
* \brief Sorts a 3D tensor stored contiguously as ABC into CBA order.
* \param abcPtr the address of the incoming ABC ordered tensor.
* \param aDimension the dimension of the A index.
* \param bDimension the dimension of the B index.
* \param cDimension the dimension of the C index.
* \param cbaPtr the address of the outgoing CBA ordered tensor.
* \param nThreads the number of parallel threads to use.
*/
template <typename Real>
void permuteABCtoCBA(Real const *__restrict__ abcPtr, int const aDimension, int const bDimension, int const cDimension,
Real *__restrict__ cbaPtr, size_t nThreads = 1) {
#pragma omp parallel for num_threads(nThreads)
for (int C = 0; C <= -1 + cDimension; ++C)
for (int B = 0; B <= -1 + bDimension; ++B)
for (int A = 0; A <= -1 + aDimension; ++A)
cbaPtr[aDimension * bDimension * C + aDimension * B + A] =
abcPtr[cDimension * bDimension * A + cDimension * B + C];
}
/*!
* \brief Sorts a 3D tensor stored contiguously as ABC into ACB order.
* \param abcPtr the address of the incoming ABC ordered tensor.
* \param aDimension the dimension of the A index.
* \param bDimension the dimension of the B index.
* \param cDimension the dimension of the C index.
* \param acbPtr the address of the outgoing ACB ordered tensor.
* \param nThreads the number of parallel threads to use.
*/
template <typename Real>
void permuteABCtoACB(Real const *__restrict__ abcPtr, int const aDimension, int const bDimension, int const cDimension,
Real *__restrict__ acbPtr, size_t nThreads = 1) {
#pragma omp parallel for num_threads(nThreads)
for (int A = 0; A <= -1 + aDimension; ++A)
for (int C = 0; C <= -1 + cDimension; ++C)
for (int B = 0; B <= -1 + bDimension; ++B)
acbPtr[bDimension * cDimension * A + bDimension * C + B] =
abcPtr[cDimension * bDimension * A + cDimension * B + C];
}
/*!
* \brief Contracts an ABxC tensor with a DxC tensor, to produce an ABxD quantity.
* \param abcPtr the address of the incoming ABxC tensor.
* \param dcPtr the address of the incoming DxC tensor.
* \param abDimension the dimension of the AB index.
* \param cDimension the dimension of the C index.
* \param dDimension the dimension of the D index.
* \param abdPtr the address of the outgoing ABD tensor.
*/
template <typename Real>
void contractABxCWithDxC(Real const *__restrict__ abcPtr, Real const *__restrict__ dcPtr, int const abDimension,
int const cDimension, int const dDimension, Real *__restrict__ abdPtr) {
Real acc_C;
for (int AB = 0; AB <= -1 + abDimension; ++AB) {
for (int D = 0; D <= -1 + dDimension; ++D) {
acc_C = 0;
for (int C = 0; C <= -1 + cDimension; ++C)
acc_C = acc_C + abcPtr[cDimension * AB + C] * dcPtr[cDimension * D + C];
abdPtr[dDimension * AB + D] = acc_C;
}
}
}
#if HAVE_BLAS == 1
template <>
void contractABxCWithDxC<float>(float const *__restrict__ abcPtr, float const *__restrict__ dcPtr,
int const abDimension, int const cDimension, int const dDimension,
float *__restrict__ abdPtr) {
if (abDimension == 0 || cDimension == 0 || dDimension == 0) return;
char transB = 't';
char transA = 'n';
float alpha = 1;
float beta = 0;
sgemm_(&transB, &transA, const_cast<int *>(&dDimension), const_cast<int *>(&abDimension),
const_cast<int *>(&cDimension), &alpha, const_cast<float *>(dcPtr), const_cast<int *>(&cDimension),
const_cast<float *>(abcPtr), const_cast<int *>(&cDimension), &beta, abdPtr, const_cast<int *>(&dDimension));
}
template <>
void contractABxCWithDxC<double>(double const *__restrict__ abcPtr, double const *__restrict__ dcPtr,
int const abDimension, int const cDimension, int const dDimension,
double *__restrict__ abdPtr) {
if (abDimension == 0 || cDimension == 0 || dDimension == 0) return;
char transB = 't';
char transA = 'n';
double alpha = 1;
double beta = 0;
dgemm_(&transB, &transA, const_cast<int *>(&dDimension), const_cast<int *>(&abDimension),
const_cast<int *>(&cDimension), &alpha, const_cast<double *>(dcPtr), const_cast<int *>(&cDimension),
const_cast<double *>(abcPtr), const_cast<int *>(&cDimension), &beta, abdPtr, const_cast<int *>(&dDimension));
}
#endif
} // Namespace helpme
#endif // Header guard
/*!
* \file helpme.h
* \brief Contains the C++ implementation of a PME Instance, and related helper classes.
*/
namespace helpme {
/*!
* \brief nCartesian computes the total number of Cartesian components of a given angular momentum.
* \param L the angular momentum.
* \return total number of components up to and including angular momentum L.
*/
static int nCartesian(int L) { return (L + 1) * (L + 2) * (L + 3) / 6; }
/*!
* \brief cartAddress computes the address of a term with given quantum numbers in a Cartesian buffer.
* \param lx the x quantum number.
* \param ly the y quantum number.
* \param lz the z quantum number.
* \return the address of an {lx, ly, lz} quantity in a buffer that contains all lower angular momentum terms too.
*/
static int cartAddress(int lx, int ly, int lz) {
int l = lx + ly + lz;
return l * (l + 1) * (l + 2) / 6 + lz * (l * 2 - lz + 3) / 2 + ly;
}
// This is used to define function pointers in the constructor, and makes it easy to add new kernels.
#define ENABLE_KERNEL_WITH_INVERSE_R_EXPONENT_OF(n) \
case n: \
convolveEVFxn_ = &convolveEVImpl<n>; \
convolveEVCompressedFxn_ = &convolveEVCompressedImpl<n>; \
cacheInfluenceFunctionFxn_ = &cacheInfluenceFunctionImpl<n>; \
slfEFxn_ = &slfEImpl<n>; \
dirEFxn_ = &dirEImpl<n>; \
adjEFxn_ = &adjEImpl<n>; \
dirEFFxn_ = &dirEFImpl<n>; \
adjEFFxn_ = &adjEFImpl<n>; \
break;
/*!
* \class splineCacheEntry
* \brief A placeholder to encapsulate information about a given atom's splines
*/
template <typename Real>
struct SplineCacheEntry {
BSpline<Real> aSpline, bSpline, cSpline;
int absoluteAtomNumber;
SplineCacheEntry(int order, int derivativeLevel)
: aSpline(0, 0, order, derivativeLevel),
bSpline(0, 0, order, derivativeLevel),
cSpline(0, 0, order, derivativeLevel),
absoluteAtomNumber(-1) {}
};
/*!
* \class PMEInstance
* \brief A class to encapsulate information related to a particle mesh Ewald calculation.
*
* By storing information related to a single PME calculation in this way, we allow multiple
* instances to be created in calculations requiring multiple PMEs, e.g. for computing both
* electrostatic and attractive dispersion terms using PME to handle long-range interactions.
* \tparam Real the floating point type to use for arithmetic.
*/
template <typename Real, typename std::enable_if<std::is_floating_point<Real>::value, int>::type = 0>
class PMEInstance {
using GridIterator = std::vector<std::vector<std::pair<short, short>>>;
using Complex = std::complex<Real>;
using Spline = BSpline<Real>;
using RealMat = Matrix<Real>;
using RealVec = helpme::vector<Real>;
public:
/*!
* \brief The algorithm being used to solve for the reciprocal space quantities.
*/
enum class AlgorithmType : int { Undefined = 0, PME = 1, CompressedPME = 2 };
/*!
* \brief The different conventions for orienting a lattice constructed from input parameters.
*/
enum class LatticeType : int { Undefined = 0, XAligned = 1, ShapeMatrix = 2 };
/*!
* \brief The different conventions for numbering nodes.
*/
enum class NodeOrder : int { Undefined = 0, ZYX = 1 };
protected:
/// The FFT grid dimensions in the {A,B,C} grid dimensions.
int gridDimensionA_ = 0, gridDimensionB_ = 0, gridDimensionC_ = 0;
/// The number of K vectors in the {A,B,C} dimensions. Equal to dim{A,B,C} for PME, lower for cPME.
int numKSumTermsA_ = 0, numKSumTermsB_ = 0, numKSumTermsC_ = 0;
/// The number of K vectors in the {A,B,C} dimensions to be handled by this node in a parallel setup.
int myNumKSumTermsA_ = 0, myNumKSumTermsB_ = 0, myNumKSumTermsC_ = 0;
/// The full A dimension after real->complex transformation.
int complexGridDimensionA_ = 0;
/// The locally owned A dimension after real->complex transformation.
int myComplexGridDimensionA_ = 0;
/// The order of the cardinal B-Spline used for interpolation.
int splineOrder_ = 0;
/// The actual number of threads per MPI instance, and the number requested previously.
int nThreads_ = -1, requestedNumberOfThreads_ = -1;
/// The exponent of the (inverse) interatomic distance used in this kernel.
int rPower_ = 0;
/// The scale factor to apply to all energies and derivatives.
Real scaleFactor_ = 0;
/// The attenuation parameter, whose units should be the inverse of those used to specify coordinates.
Real kappa_ = 0;
/// The lattice vectors.
RealMat boxVecs_ = RealMat(3, 3);
/// The reciprocal lattice vectors.
RealMat recVecs_ = RealMat(3, 3);
/// The scaled reciprocal lattice vectors, for transforming forces from scaled fractional coordinates.
RealMat scaledRecVecs_ = RealMat(3, 3);
/// A list of the number of splines handle by each thread on this node.
std::vector<size_t> numAtomsPerThread_;
/// An iterator over angular momentum components.
std::vector<std::array<short, 3>> angMomIterator_;
/// From a given starting point on the {A,B,C} edge of the grid, lists all points to be handled, correctly wrapping
/// around the end.
GridIterator gridIteratorA_, gridIteratorB_, gridIteratorC_;
/// The grid iterator for the C dimension, divided up by threads to avoid race conditions in parameter spreading.
std::vector<GridIterator> threadedGridIteratorC_;
/// The (inverse) bspline moduli to normalize the spreading / probing steps; these are folded into the convolution.
RealVec splineModA_, splineModB_, splineModC_;
/// The cached influence function involved in the convolution.
RealVec cachedInfluenceFunction_;
/// A function pointer to call the approprate function to implement convolution with virial for conventional PME,
/// templated to the rPower value.
std::function<Real(bool, int, int, int, int, int, int, int, Real, Complex *, const RealMat &, Real, Real,
const Real *, const Real *, const Real *, const int *, const int *, const int *, RealMat &, int)>
convolveEVFxn_;
/// A function pointer to call the approprate function to implement convolution with virial for comporessed PME,
/// templated to the rPower value.
std::function<Real(int, int, int, int, int, int, Real, const Real *, Real *, const RealMat &, Real, Real,
const Real *, const Real *, const Real *, const int *, const int *, const int *, RealMat &, int)>
convolveEVCompressedFxn_;
/// A function pointer to call the approprate function to implement cacheing of the influence function that appears
// in the convolution, templated to the rPower value.
std::function<void(int, int, int, int, int, int, Real, RealVec &, const RealMat &, Real, Real, const Real *,
const Real *, const Real *, const int *, const int *, const int *, int)>
cacheInfluenceFunctionFxn_;
/// A function pointer to call the approprate function to compute self energy, templated to the rPower value.
std::function<Real(int, Real, Real)> slfEFxn_;
/// A function pointer to call the approprate function to compute the direct energy, templated to the rPower value.
std::function<Real(Real, Real)> dirEFxn_;
/// A function pointer to call the approprate function to compute the adjusted energy, templated to the rPower
/// value.
std::function<Real(Real, Real)> adjEFxn_;
/// A function pointer to call the approprate function to compute the direct energy and force, templated to the
/// rPower value.
std::function<std::tuple<Real, Real>(Real, Real, Real)> dirEFFxn_;
/// A function pointer to call the approprate function to compute the adjusted energy and force, templated to the
/// rPower value.
std::function<std::tuple<Real, Real>(Real, Real, Real)> adjEFFxn_;
#if HAVE_MPI == 1
/// The communicator object that handles interactions with MPI.
std::unique_ptr<MPIWrapper<Real>> mpiCommunicator_;
/// The communicator object that handles interactions with MPI along this nodes {A,B,C} pencils.
std::unique_ptr<MPIWrapper<Real>> mpiCommunicatorA_, mpiCommunicatorB_, mpiCommunicatorC_;
#endif
/// The number of nodes in the {A,B,C} dimensions.
int numNodesA_ = 1, numNodesB_ = 1, numNodesC_ = 1;
/// The rank of this node along the {A,B,C} dimensions.
int myNodeRankA_ = 0, myNodeRankB_ = 0, myNodeRankC_ = 0;
/// The first grid point that this node is responsible for in the {A,B,C} dimensions.
int myFirstGridPointA_ = 0, myFirstGridPointB_ = 0, myFirstGridPointC_ = 0;
/// The first K sum term that this node is responsible for.
int firstKSumTermA_ = 0, firstKSumTermB_ = 0, firstKSumTermC_ = 0;
/// The {X,Y,Z} dimensions of the locally owned chunk of the grid.
int myGridDimensionA_ = 0, myGridDimensionB_ = 0, myGridDimensionC_ = 0;
/// The subsets of a given dimension to be processed when doing a transform along another dimension.
int subsetOfCAlongA_ = 0, subsetOfCAlongB_ = 0, subsetOfBAlongC_ = 0;
/// The size of a cache line, in units of the size of the Real type, to allow better memory allocation policies.
Real cacheLineSizeInReals_ = 0;
/// The current unit cell parameters.
Real cellA_ = 0, cellB_ = 0, cellC_ = 0, cellAlpha_ = 0, cellBeta_ = 0, cellGamma_ = 0;
/// Whether the unit cell parameters have been changed, invalidating cached gF quantities.
bool unitCellHasChanged_ = true;
/// Whether the kappa has been changed, invalidating kappa-dependent quantities.
bool kappaHasChanged_ = true;
/// Whether any of the grid dimensions have changed.
bool gridDimensionHasChanged_ = true;
/// Whether any of the reciprocal sum dimensions have changed.
bool reciprocalSumDimensionHasChanged_ = true;
/// Whether the algorithm to be used has changed.
bool algorithmHasChanged_ = true;
/// Whether the spline order has changed.
bool splineOrderHasChanged_ = true;
/// Whether the scale factor has changed.
bool scaleFactorHasChanged_ = true;
/// Whether the power of R has changed.
bool rPowerHasChanged_ = true;
/// Whether the parallel node setup has changed in any way.
bool numNodesHasChanged_ = true;
/// The algorithm being used to solve for reciprocal space quantities.
AlgorithmType algorithmType_ = AlgorithmType::Undefined;
/// The type of alignment scheme used for the lattice vectors.
LatticeType latticeType_ = LatticeType::Undefined;
/// Communication buffers for MPI parallelism.
helpme::vector<Complex> workSpace1_, workSpace2_;
/// FFTW wrappers to help with transformations in the {A,B,C} dimensions.
FFTWWrapper<Real> fftHelperA_, fftHelperB_, fftHelperC_;
/// The cached list of splines.
std::vector<SplineCacheEntry<Real>> splineCache_;
/// A scratch array for each threads to use as storage when probing the grid.
RealMat fractionalPhis_;
/// A list of the splines that each thread should handle.
std::vector<std::list<size_t>> splinesPerThread_;
/// The transformation matrices for the compressed PME algorithms, in the {A,B,C} dimensions.
RealMat compressionCoefficientsA_, compressionCoefficientsB_, compressionCoefficientsC_;
/// Iterators that define the reciprocal lattice sums over each index, correctly defining -1/2 <= m{A,B,C} < 1/2.
std::vector<int> mValsA_, mValsB_, mValsC_;
/// A temporary list used in the assigning of atoms to threads and resorting by starting grid point.
std::vector<std::set<std::pair<uint32_t, uint32_t>>> gridAtomList_;
/*!
* \brief makeGridIterator makes an iterator over the spline values that contribute to this node's grid
* in a given Cartesian dimension. The iterator is of the form (grid point, spline index) and is
* sorted by increasing grid point, for cache efficiency.
* \param dimension the dimension of the grid in the Cartesian dimension of interest.
* \param first the first grid point in the Cartesian dimension to be handled by this node.
* \param last the element past the last grid point in the Cartesian dimension to be handled by this node.
* \param paddingSize the size of the "halo" region around this grid onto which the charge can be spread
* that really belongs to neighboring nodes. For compressed PME we assume that each node handles
* only its own atoms and spreads onto an expanded grid to account for this. In regular PME there
* is no padding because we assume that all halo atoms are present on this node before spreading.
* \return the vector of spline iterators for each starting grid point.
*/
GridIterator makeGridIterator(int dimension, int first, int last, int paddingSize) const {
GridIterator gridIterator;
if (paddingSize) {
// This version assumes that every atom on this node is blindly place on the
// grid, requiring that a padding area of size splineOrder-1 be present.
for (int gridStart = 0; gridStart < dimension; ++gridStart) {
std::vector<std::pair<short, short>> splineIterator(splineOrder_);
splineIterator.clear();
if (gridStart >= first && gridStart < last - paddingSize) {
for (int splineIndex = 0; splineIndex < splineOrder_; ++splineIndex) {
int gridPoint = (splineIndex + gridStart);
splineIterator.push_back(std::make_pair(gridPoint - first, splineIndex));
}
}
splineIterator.shrink_to_fit();
gridIterator.push_back(splineIterator);
}
} else {
// This version assumes that each node has its own atoms, plus "halo" atoms
// from neighboring grids that can contribute to this node's grid.
for (int gridStart = 0; gridStart < dimension; ++gridStart) {
std::vector<std::pair<short, short>> splineIterator(splineOrder_);
splineIterator.clear();
for (int splineIndex = 0; splineIndex < splineOrder_; ++splineIndex) {
int gridPoint = (splineIndex + gridStart) % dimension;
if (gridPoint >= first && gridPoint < last)
splineIterator.push_back(std::make_pair(gridPoint - first, splineIndex));
}
splineIterator.shrink_to_fit();
std::sort(splineIterator.begin(), splineIterator.end());
gridIterator.push_back(splineIterator);
}
}
gridIterator.shrink_to_fit();
return gridIterator;
}
/*! Make sure that the iterator over AM components is up to date.
* \param angMom the angular momentum required for the iterator over multipole components.
*/
void updateAngMomIterator(int parameterAngMom) {
auto L = parameterAngMom;
size_t expectedNTerms = nCartesian(L);
if (angMomIterator_.size() >= expectedNTerms) return;
angMomIterator_.resize(expectedNTerms);
for (int l = 0, count = 0; l <= L; ++l) {
for (int lz = 0; lz <= l; ++lz) {
for (int ly = 0; ly <= l - lz; ++ly) {
int lx = l - ly - lz;
angMomIterator_[count] = {{static_cast<short>(lx), static_cast<short>(ly), static_cast<short>(lz)}};
++count;
}
}
}
}
/*!
* \brief updateInfluenceFunction builds the gF array cache, if the lattice vector has changed since the last
* build of it. If the cell is unchanged, this does nothing. This is handled
* separately from other initializations because we may skip the cacheing of
* the influence function when the virial is requested; we assume it's an NPT
* calculation in this case and therefore the influence function changes every time.
*/
void updateInfluenceFunction() {
if (unitCellHasChanged_ || kappaHasChanged_ || reciprocalSumDimensionHasChanged_ || splineOrderHasChanged_ ||
scaleFactorHasChanged_ || numNodesHasChanged_ || algorithmHasChanged_) {
cacheInfluenceFunctionFxn_(myNumKSumTermsA_, myNumKSumTermsB_, myNumKSumTermsC_, firstKSumTermA_,
firstKSumTermB_, firstKSumTermC_, scaleFactor_, cachedInfluenceFunction_,
recVecs_, cellVolume(), kappa_, &splineModA_[0], &splineModB_[0],
&splineModC_[0], mValsA_.data(), mValsB_.data(), mValsC_.data(), nThreads_);
}
}
/*!
* \brief Spreads parameters onto the grid for a single atom
* \param atom the absolute atom number.
* \param realGrid pointer to the array containing the grid in CBA order
* \param nComponents the number of angular momentum components in the parameters.
* \param nForceComponents the number of angular momentum components in the parameters with one extra
* level of angular momentum to permit evaluation of forces.
* \param splineA the BSpline object for the A direction.
* \param splineB the BSpline object for the B direction.
* \param splineC the BSpline object for the C direction.
* \param parameters the list of parameters associated with each atom (charges, C6 coefficients, multipoles,
* etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL is expected, where nL =
* (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering
*
* 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ...
*
* i.e. generated by the python loops
* \code{.py}
* for L in range(maxAM+1):
* for Lz in range(0,L+1):
* for Ly in range(0, L - Lz + 1):
* Lx = L - Ly - Lz
* \endcode
* \param thread the ID of the thread handling this term.
*/
void spreadParametersImpl(const int &atom, Real *realGrid, const int &nComponents, const Spline &splineA,
const Spline &splineB, const Spline &splineC, const RealMat ¶meters, int thread) {
const auto &aGridIterator = gridIteratorA_[splineA.startingGridPoint()];
const auto &bGridIterator = gridIteratorB_[splineB.startingGridPoint()];
const auto &cGridIterator = threadedGridIteratorC_[thread][splineC.startingGridPoint()];
int numPointsA = static_cast<int>(aGridIterator.size());
int numPointsB = static_cast<int>(bGridIterator.size());
int numPointsC = static_cast<int>(cGridIterator.size());
const auto *iteratorDataA = aGridIterator.data();
const auto *iteratorDataB = bGridIterator.data();
const auto *iteratorDataC = cGridIterator.data();
for (int component = 0; component < nComponents; ++component) {
const auto &quanta = angMomIterator_[component];
Real param = parameters(atom, component);
const Real *splineValsA = splineA[quanta[0]];
const Real *splineValsB = splineB[quanta[1]];
const Real *splineValsC = splineC[quanta[2]];
for (int pointC = 0; pointC < numPointsC; ++pointC) {
const auto &cPoint = iteratorDataC[pointC];
Real cValP = param * splineValsC[cPoint.second];
for (int pointB = 0; pointB < numPointsB; ++pointB) {
const auto &bPoint = iteratorDataB[pointB];
Real cbValP = cValP * splineValsB[bPoint.second];
Real *cbRow = realGrid + cPoint.first * myGridDimensionB_ * myGridDimensionA_ +
bPoint.first * myGridDimensionA_;
for (int pointA = 0; pointA < numPointsA; ++pointA) {
const auto &aPoint = iteratorDataA[pointA];
cbRow[aPoint.first] += cbValP * splineValsA[aPoint.second];
}
}
}
}
}
/*!
* \brief Probes the grid and computes the force for a single atom, specialized for zero parameter angular momentum.
* \param potentialGrid pointer to the array containing the potential, in ZYX order.
* \param splineA the BSpline object for the A direction.
* \param splineB the BSpline object for the B direction.
* \param splineC the BSpline object for the C direction.
* \param parameter the list of parameter associated with the given atom.
* \param forces a 3 vector of the forces for this atom, ordered in memory as {Fx, Fy, Fz}.
*/
void probeGridImpl(const Real *potentialGrid, const Spline &splineA, const Spline &splineB, const Spline &splineC,
const Real ¶meter, Real *forces) const {
const auto &aGridIterator = gridIteratorA_[splineA.startingGridPoint()];
const auto &bGridIterator = gridIteratorB_[splineB.startingGridPoint()];
const auto &cGridIterator = gridIteratorC_[splineC.startingGridPoint()];
// We unpack the vector to raw pointers, as profiling shows that using range based for loops over vectors
// causes a signficant penalty in the innermost loop, primarily due to checking the loop stop condition.
int numPointsA = static_cast<int>(aGridIterator.size());
int numPointsB = static_cast<int>(bGridIterator.size());
int numPointsC = static_cast<int>(cGridIterator.size());
const auto *iteratorDataA = aGridIterator.data();
const auto *iteratorDataB = bGridIterator.data();
const auto *iteratorDataC = cGridIterator.data();
const Real *splineStartA0 = splineA[0];
const Real *splineStartB0 = splineB[0];
const Real *splineStartC0 = splineC[0];
const Real *splineStartA1 = splineStartA0 + splineOrder_;
const Real *splineStartB1 = splineStartB0 + splineOrder_;
const Real *splineStartC1 = splineStartC0 + splineOrder_;
Real Ex = 0, Ey = 0, Ez = 0;
for (int pointC = 0; pointC < numPointsC; ++pointC) {
const auto &cPoint = iteratorDataC[pointC];
const Real &splineC0 = splineStartC0[cPoint.second];
const Real &splineC1 = splineStartC1[cPoint.second];
for (int pointB = 0; pointB < numPointsB; ++pointB) {
const auto &bPoint = iteratorDataB[pointB];
const Real &splineB0 = splineStartB0[bPoint.second];
const Real &splineB1 = splineStartB1[bPoint.second];
const Real *cbRow = potentialGrid + cPoint.first * myGridDimensionA_ * myGridDimensionB_ +
bPoint.first * myGridDimensionA_;
for (int pointA = 0; pointA < numPointsA; ++pointA) {
const auto &aPoint = iteratorDataA[pointA];
const Real &splineA0 = splineStartA0[aPoint.second];
const Real &splineA1 = splineStartA1[aPoint.second];
const Real &gridVal = cbRow[aPoint.first];
Ey += gridVal * splineA0 * splineB1 * splineC0;
Ez += gridVal * splineA0 * splineB0 * splineC1;
Ex += gridVal * splineA1 * splineB0 * splineC0;
}
}
}
forces[0] -= parameter * (scaledRecVecs_[0][0] * Ex + scaledRecVecs_[0][1] * Ey + scaledRecVecs_[0][2] * Ez);
forces[1] -= parameter * (scaledRecVecs_[1][0] * Ex + scaledRecVecs_[1][1] * Ey + scaledRecVecs_[1][2] * Ez);
forces[2] -= parameter * (scaledRecVecs_[2][0] * Ex + scaledRecVecs_[2][1] * Ey + scaledRecVecs_[2][2] * Ez);
}
/*!
* \brief Probes the grid and computes the force for a single atom, for arbitrary parameter angular momentum.
* \param potentialGrid pointer to the array containing the potential, in ZYX order.
* \param nPotentialComponents the number of components in the potential and its derivatives with one extra
* level of angular momentum to permit evaluation of forces.
* \param splineA the BSpline object for the A direction.
* \param splineB the BSpline object for the B direction.
* \param splineC the BSpline object for the C direction.
* \param phiPtr a scratch array of length nPotentialComponents, to store the fractional potential.
* N.B. Make sure that updateAngMomIterator() has been called first with the appropriate derivative
* level for the requested potential derivatives.
*/
void probeGridImpl(const Real *potentialGrid, const int &nPotentialComponents, const Spline &splineA,
const Spline &splineB, const Spline &splineC, Real *phiPtr) {
const auto &aGridIterator = gridIteratorA_[splineA.startingGridPoint()];
const auto &bGridIterator = gridIteratorB_[splineB.startingGridPoint()];
const auto &cGridIterator = gridIteratorC_[splineC.startingGridPoint()];
const Real *splineStartA = splineA[0];
const Real *splineStartB = splineB[0];
const Real *splineStartC = splineC[0];
for (const auto &cPoint : cGridIterator) {
for (const auto &bPoint : bGridIterator) {
const Real *cbRow = potentialGrid + cPoint.first * myGridDimensionA_ * myGridDimensionB_ +
bPoint.first * myGridDimensionA_;
for (const auto &aPoint : aGridIterator) {
Real gridVal = cbRow[aPoint.first];
for (int component = 0; component < nPotentialComponents; ++component) {
const auto &quanta = angMomIterator_[component];
const Real *splineValsA = splineStartA + quanta[0] * splineOrder_;
const Real *splineValsB = splineStartB + quanta[1] * splineOrder_;
const Real *splineValsC = splineStartC + quanta[2] * splineOrder_;
phiPtr[component] += gridVal * splineValsA[aPoint.second] * splineValsB[bPoint.second] *
splineValsC[cPoint.second];
}
}
}
}
}
/*!
* \brief Probes the grid and computes the force for a single atom, for arbitrary parameter angular momentum.
* \param atom the absolute atom number.
* \param potentialGrid pointer to the array containing the potential, in ZYX order.
* \param nComponents the number of angular momentum components in the parameters.
* \param nForceComponents the number of angular momentum components in the parameters with one extra
* level of angular momentum to permit evaluation of forces.
* \param splineA the BSpline object for the A direction.
* \param splineB the BSpline object for the B direction.
* \param splineC the BSpline object for the C direction.
* \param phiPtr a scratch array of length nForceComponents, to store the fractional potential.
* \param fracParameters the list of parameters associated with the current atom, in
* the scaled fraction coordinate basis (charges, C6 coefficients,
* multipoles, etc...). For a parameter with angular momentum L, a matrix
* of dimension nAtoms x nL is expected, where
* nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering
*
* 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ...
*
* i.e. generated by the python loops
* \code{.py}
* for L in range(maxAM+1):
* for Lz in range(0,L+1):
* for Ly in range(0, L - Lz + 1):
* Lx = L - Ly - Lz
* \endcode
* \param forces a Nx3 matrix of the forces, ordered in memory as {Fx1,Fy1,Fz1,Fx2,Fy2,Fz2,....FxN,FyN,FzN}.
*/
void probeGridImpl(const int &atom, const Real *potentialGrid, const int &nComponents, const int &nForceComponents,
const Spline &splineA, const Spline &splineB, const Spline &splineC, Real *phiPtr,
const Real *fracParameters, Real *forces) {
std::fill(phiPtr, phiPtr + nForceComponents, 0);
probeGridImpl(potentialGrid, nForceComponents, splineA, splineB, splineC, phiPtr);
Real fracForce[3] = {0, 0, 0};
for (int component = 0; component < nComponents; ++component) {
Real param = fracParameters[component];
const auto &quanta = angMomIterator_[component];
short lx = quanta[0];
short ly = quanta[1];
short lz = quanta[2];
fracForce[0] -= param * phiPtr[cartAddress(lx + 1, ly, lz)];
fracForce[1] -= param * phiPtr[cartAddress(lx, ly + 1, lz)];
fracForce[2] -= param * phiPtr[cartAddress(lx, ly, lz + 1)];
}
forces[0] += scaledRecVecs_[0][0] * fracForce[0] + scaledRecVecs_[0][1] * fracForce[1] +
scaledRecVecs_[0][2] * fracForce[2];
forces[1] += scaledRecVecs_[1][0] * fracForce[0] + scaledRecVecs_[1][1] * fracForce[1] +
scaledRecVecs_[1][2] * fracForce[2];
forces[2] += scaledRecVecs_[2][0] * fracForce[0] + scaledRecVecs_[2][1] * fracForce[1] +
scaledRecVecs_[2][2] * fracForce[2];
}
/*!
* \brief assertInitialized makes sure that setup() has been called before running any calculations.
*/
void assertInitialized() const {
if (!rPower_)
throw std::runtime_error(
"Either setup(...) or setup_parallel(...) must be called before computing anything.");
}
/*!
* \brief makeBSplines construct the {x,y,z} B-Splines.
* \param atomCoords a 3-vector containing the atom's coordinates.
* \param derivativeLevel level of derivative needed for the splines.
* \return a 3-tuple containing the {x,y,z} B-splines.
*/
std::tuple<Spline, Spline, Spline> makeBSplines(const Real *atomCoords, short derivativeLevel) const {
// Subtract a tiny amount to make sure we're not exactly on the rightmost (excluded)
// grid point. The calculation is translationally invariant, so this is valid.
constexpr float EPS = 1e-6f;
Real aCoord =
atomCoords[0] * recVecs_(0, 0) + atomCoords[1] * recVecs_(1, 0) + atomCoords[2] * recVecs_(2, 0) - EPS;
Real bCoord =
atomCoords[0] * recVecs_(0, 1) + atomCoords[1] * recVecs_(1, 1) + atomCoords[2] * recVecs_(2, 1) - EPS;
Real cCoord =
atomCoords[0] * recVecs_(0, 2) + atomCoords[1] * recVecs_(1, 2) + atomCoords[2] * recVecs_(2, 2) - EPS;
// Make sure the fractional coordinates fall in the range 0 <= s < 1
aCoord -= floor(aCoord);
bCoord -= floor(bCoord);
cCoord -= floor(cCoord);
short aStartingGridPoint = gridDimensionA_ * aCoord;
short bStartingGridPoint = gridDimensionB_ * bCoord;
short cStartingGridPoint = gridDimensionC_ * cCoord;
Real aDistanceFromGridPoint = gridDimensionA_ * aCoord - aStartingGridPoint;
Real bDistanceFromGridPoint = gridDimensionB_ * bCoord - bStartingGridPoint;
Real cDistanceFromGridPoint = gridDimensionC_ * cCoord - cStartingGridPoint;
return std::make_tuple(Spline(aStartingGridPoint, aDistanceFromGridPoint, splineOrder_, derivativeLevel),
Spline(bStartingGridPoint, bDistanceFromGridPoint, splineOrder_, derivativeLevel),
Spline(cStartingGridPoint, cDistanceFromGridPoint, splineOrder_, derivativeLevel));
}
/*!
* \brief convolveEVImpl performs the reciprocal space convolution, returning the energy, for conventional PME.
* We opt to not cache this the same way as the non-virial version because it's safe to assume that if
* the virial is requested the box is likely to change, which renders the cache useless.
* \tparam rPower the exponent of the (inverse) distance kernel (e.g. 1 for Coulomb, 6 for attractive dispersion).
* \param useConjugateSymmetry whether to use the complex conjugate symmetry in the convolution or not.
* \param fullNx full (complex) dimension of the reciprocal sum in the X direction.
* \param myNx the subset of the reciprocal sum in the x direction to be handled by this node.
* \param myNy the subset of the reciprocal sum in the y direction to be handled by this node.
* \param myNz the subset of the reciprocal sum in the z direction to be handled by this node.
* \param startX the starting reciprocal sum term handled by this node in the X direction.
* \param startY the starting reciprocal sum term handled by this node in the Y direction.
* \param startZ the starting reciprocal sum term handled by this node in the Z direction.
* \param scaleFactor a scale factor to be applied to all computed energies and derivatives thereof (e.g. the
* 1 / [4 pi epslion0] for Coulomb calculations).
* \param gridPtr the Fourier space grid, with ordering YXZ.
* \param boxInv the reciprocal lattice vectors.
* \param volume the volume of the unit cell.
* \param kappa the attenuation parameter in units inverse of those used to specify coordinates.
* \param xMods the Fourier space norms of the x B-Splines.
* \param yMods the Fourier space norms of the y B-Splines.
* \param zMods the Fourier space norms of the z B-Splines.
* \param xMVals the integer prefactors to iterate over reciprocal vectors in the x dimension.
* \param yMVals the integer prefactors to iterate over reciprocal vectors in the y dimension.
* \param zMVals the integer prefactors to iterate over reciprocal vectors in the z dimension.
* \param virial a vector of length 6 containing the unique virial elements, in the order XX XY YY XZ YZ ZZ.
* This vector is incremented, not assigned.
* \param nThreads the number of OpenMP threads to use.
* \return the reciprocal space energy.
*/
template <int rPower>
static Real convolveEVImpl(bool useConjugateSymmetry, int fullNx, int myNx, int myNy, int myNz, int startX,
int startY, int startZ, Real scaleFactor, Complex *gridPtr, const RealMat &boxInv,
Real volume, Real kappa, const Real *xMods, const Real *yMods, const Real *zMods,
const int *xMVals, const int *yMVals, const int *zMVals, RealMat &virial, int nThreads) {
Real energy = 0;
bool nodeZero = startX == 0 && startY == 0 && startZ == 0;
if (rPower > 3 && nodeZero) {
// Kernels with rPower>3 are absolutely convergent and should have the m=0 term present.
// To compute it we need sum_ij c(i)c(j), which can be obtained from the structure factor norm.
Real prefac = 2 * scaleFactor * HELPME_PI * HELPME_SQRTPI * pow(kappa, rPower - 3) /
((rPower - 3) * gammaComputer<Real, rPower>::value * volume);
energy += prefac * (gridPtr[0].real() * gridPtr[0].real() + gridPtr[0].imag() * gridPtr[0].imag());
}
// Ensure the m=0 term convolution product is zeroed for the backtransform; it's been accounted for above.
if (nodeZero) gridPtr[0] = Complex(0, 0);
Real bPrefac = HELPME_PI * HELPME_PI / (kappa * kappa);
Real volPrefac =
scaleFactor * pow(HELPME_PI, rPower - 1) / (HELPME_SQRTPI * gammaComputer<Real, rPower>::value * volume);
size_t nxz = (size_t)myNx * myNz;
Real Vxx = 0, Vxy = 0, Vyy = 0, Vxz = 0, Vyz = 0, Vzz = 0;
const Real *boxPtr = boxInv[0];
size_t nyxz = myNy * nxz;
// Exclude m=0 cell.
int start = (nodeZero ? 1 : 0);
// Writing the three nested loops in one allows for better load balancing in parallel.
#pragma omp parallel for reduction(+ : energy, Vxx, Vxy, Vyy, Vxz, Vyz, Vzz) num_threads(nThreads)
for (size_t yxz = start; yxz < nyxz; ++yxz) {
size_t xz = yxz % nxz;
short ky = yxz / nxz;
short kx = xz / myNz;
short kz = xz % myNz;
// We only loop over the first nx/2+1 x values in the complex case;
// this accounts for the "missing" complex conjugate values.
Real permPrefac = (useConjugateSymmetry && (kx + startX != 0) && (kx + startX != fullNx - 1)) ? 2 : 1;
const int &mx = xMVals[kx];
const int &my = yMVals[ky];
const int &mz = zMVals[kz];
Real mVecX = boxPtr[0] * mx + boxPtr[1] * my + boxPtr[2] * mz;
Real mVecY = boxPtr[3] * mx + boxPtr[4] * my + boxPtr[5] * mz;
Real mVecZ = boxPtr[6] * mx + boxPtr[7] * my + boxPtr[8] * mz;
Real mNormSq = mVecX * mVecX + mVecY * mVecY + mVecZ * mVecZ;
Real mTerm = raiseNormToIntegerPower<Real, rPower - 3>::compute(mNormSq);
Real bSquared = bPrefac * mNormSq;
auto gammas = incompleteGammaVirialComputer<Real, 3 - rPower>::compute(bSquared);
Real eGamma = std::get<0>(gammas);
Real vGamma = std::get<1>(gammas);
Complex &gridVal = gridPtr[yxz];
Real structFacNorm = gridVal.real() * gridVal.real() + gridVal.imag() * gridVal.imag();
Real totalPrefac = volPrefac * mTerm * yMods[ky] * xMods[kx] * zMods[kz];
Real influenceFunction = totalPrefac * eGamma;
gridVal *= influenceFunction;
Real eTerm = permPrefac * influenceFunction * structFacNorm;
Real vTerm = permPrefac * vGamma * totalPrefac / mNormSq * structFacNorm;
energy += eTerm;
Vxx += vTerm * mVecX * mVecX;
Vxy += vTerm * mVecX * mVecY;
Vyy += vTerm * mVecY * mVecY;
Vxz += vTerm * mVecX * mVecZ;
Vyz += vTerm * mVecY * mVecZ;
Vzz += vTerm * mVecZ * mVecZ;
}
energy /= 2;
virial[0][0] -= Vxx - energy;
virial[0][1] -= Vxy;
virial[0][2] -= Vyy - energy;
virial[0][3] -= Vxz;
virial[0][4] -= Vyz;
virial[0][5] -= Vzz - energy;
return energy;
}
/*!
* \brief convolveEVCompressedImpl performs the reciprocal space convolution, returning the energy, for compressed
* PME. We opt to not cache this the same way as the non-virial version because it's safe to assume that if the
* virial is requested the box is likely to change, which renders the cache useless.
* \tparam rPower the exponent of the (inverse) distance kernel (e.g. 1 for Coulomb, 6 for attractive dispersion).
* \param myNx the subset of the reciprocal sum in the x direction to be handled by this node.
* \param myNy the subset of the reciprocal sum in the y direction to be handled by this node.
* \param myNz the subset of the reciprocal sum in the z direction to be handled by this node.
* \param startX the starting reciprocal sum term handled by this node in the X direction.
* \param startY the starting reciprocal sum term handled by this node in the Y direction.
* \param startZ the starting reciprocal sum term handled by this node in the Z direction.
* \param scaleFactor a scale factor to be applied to all computed energies and derivatives thereof
* (e.g. thee 1 / [4 pi epslion0] for Coulomb calculations).
* \param gridPtrIn the Fourier space grid, with ordering YXZ.
* \param gridPtrOut the convolved Fourier space grid, with ordering YXZ.
* \param boxInv the reciprocal lattice vectors.
* \param volume the volume of the unit cell.
* \param kappa the attenuation parameter in units inverse of those used to specify coordinates.
* \param xMods the Fourier space norms of the x B-Splines.
* \param yMods the Fourier space norms of the y B-Splines.
* \param zMods the Fourier space norms of the z B-Splines.
* \param xMVals the integer prefactors to iterate over reciprocal vectors in the x dimension.
* \param yMVals the integer prefactors to iterate over reciprocal vectors in the y dimension.
* \param zMVals the integer prefactors to iterate over reciprocal vectors in the z dimension.
* \param virial a vector of length 6 containing the unique virial elements, in the order XX XY YY XZ YZ ZZ.
* This vector is incremented, not assigned.
* \param nThreads the number of OpenMP threads to use.
* \return the reciprocal space energy.
*/
template <int rPower>
static Real convolveEVCompressedImpl(int myNx, int myNy, int myNz, int startX, int startY, int startZ,
Real scaleFactor, const Real *__restrict__ gridPtrIn,
Real *__restrict__ gridPtrOut, const RealMat &boxInv, Real volume, Real kappa,
const Real *xMods, const Real *yMods, const Real *zMods, const int *xMVals,
const int *yMVals, const int *zMVals, RealMat &virial, int nThreads) {
Real energy = 0;
bool nodeZero = startX == 0 && startY == 0 && startZ == 0;
if (rPower > 3 && nodeZero) {
// Kernels with rPower>3 are absolutely convergent and should have the m=0 term present.
// To compute it we need sum_ij c(i)c(j), which can be obtained from the structure factor norm.
Real prefac = 2 * scaleFactor * HELPME_PI * HELPME_SQRTPI * pow(kappa, rPower - 3) /
((rPower - 3) * gammaComputer<Real, rPower>::value * volume);
energy += prefac * gridPtrIn[0] * gridPtrIn[0];
}
// Ensure the m=0 term convolution product is zeroed for the backtransform; it's been accounted for above.
if (nodeZero) gridPtrOut[0] = 0;
Real bPrefac = HELPME_PI * HELPME_PI / (kappa * kappa);
Real volPrefac =
scaleFactor * pow(HELPME_PI, rPower - 1) / (HELPME_SQRTPI * gammaComputer<Real, rPower>::value * volume);
size_t nxz = (size_t)myNx * myNz;
size_t nyxz = myNy * nxz;
Real Vxx = 0, Vxy = 0, Vyy = 0, Vxz = 0, Vyz = 0, Vzz = 0;
const Real *boxPtr = boxInv[0];
// Exclude m=0 cell.
int start = (nodeZero ? 1 : 0);
// Writing the three nested loops in one allows for better load balancing in parallel.
#pragma omp parallel for reduction(+ : energy, Vxx, Vxy, Vyy, Vxz, Vyz, Vzz) num_threads(nThreads)
for (size_t yxz = start; yxz < nyxz; ++yxz) {
size_t xz = yxz % nxz;
short ky = yxz / nxz;
short kx = xz / myNz;
short kz = xz % myNz;
// We only loop over the first nx/2+1 x values in the complex case;
// this accounts for the "missing" complex conjugate values.
const int &mx = xMVals[kx];
const int &my = yMVals[ky];
const int &mz = zMVals[kz];
Real mVecX = boxPtr[0] * mx + boxPtr[1] * my + boxPtr[2] * mz;
Real mVecY = boxPtr[3] * mx + boxPtr[4] * my + boxPtr[5] * mz;
Real mVecZ = boxPtr[6] * mx + boxPtr[7] * my + boxPtr[8] * mz;
Real mNormSq = mVecX * mVecX + mVecY * mVecY + mVecZ * mVecZ;
Real mTerm = raiseNormToIntegerPower<Real, rPower - 3>::compute(mNormSq);
Real bSquared = bPrefac * mNormSq;
auto gammas = incompleteGammaVirialComputer<Real, 3 - rPower>::compute(bSquared);
Real eGamma = std::get<0>(gammas);
Real vGamma = std::get<1>(gammas);
const Real &gridVal = gridPtrIn[yxz];
size_t minusKx = (mx == 0 ? 0 : (mx < 0 ? kx - 1 : kx + 1));
size_t minusKy = (my == 0 ? 0 : (my < 0 ? ky - 1 : ky + 1));
size_t minusKz = (mz == 0 ? 0 : (mz < 0 ? kz - 1 : kz + 1));
size_t addressXY = minusKy * nxz + minusKx * myNz + kz;
size_t addressXZ = ky * nxz + minusKx * myNz + minusKz;
size_t addressYZ = minusKy * nxz + (size_t)kx * myNz + minusKz;
Real totalPrefac = volPrefac * mTerm * yMods[ky] * xMods[kx] * zMods[kz];
Real influenceFunction = totalPrefac * eGamma;
gridPtrOut[yxz] = gridVal * influenceFunction;
Real eTerm = influenceFunction * gridVal * gridVal;
Real vPrefac = vGamma * totalPrefac / mNormSq * gridVal;
Real vTerm = vPrefac * gridVal;
Real vTermXY = vPrefac * gridPtrIn[addressXY];
Real vTermXZ = vPrefac * gridPtrIn[addressXZ];
Real vTermYZ = vPrefac * gridPtrIn[addressYZ];
energy += eTerm;
Vxx += vTerm * mVecX * mVecX;
Vxy -= vTermXY * mVecX * mVecY;
Vyy += vTerm * mVecY * mVecY;
Vxz -= vTermXZ * mVecX * mVecZ;
Vyz -= vTermYZ * mVecY * mVecZ;
Vzz += vTerm * mVecZ * mVecZ;
}
energy /= 2;
virial[0][0] -= Vxx - energy;
virial[0][1] -= Vxy;
virial[0][2] -= Vyy - energy;
virial[0][3] -= Vxz;
virial[0][4] -= Vyz;
virial[0][5] -= Vzz - energy;
return energy;
}
/*!
* \brief checkMinimumImageCutoff ensure that the box dimensions satisfy the condition
* sphericalCutoff < MIN(W_A, W_B, W_C)/2
*
* where
*
* W_A = |A.(B x C)| / |B x C|
* W_B = |B.(C x A)| / |C x A|
* W_C = |C.(A x B)| / |A x B|
*
* \param sphericalCutoff the spherical nonbonded cutoff in Angstrom
*/
void checkMinimumImageCutoff(int sphericalCutoff) {
Real V = cellVolume();
Real ABx = boxVecs_(0, 1) * boxVecs_(1, 2) - boxVecs_(0, 2) * boxVecs_(1, 1);
Real ABy = boxVecs_(0, 0) * boxVecs_(1, 2) - boxVecs_(0, 2) * boxVecs_(1, 0);
Real ABz = boxVecs_(0, 0) * boxVecs_(1, 1) - boxVecs_(0, 1) * boxVecs_(1, 0);
Real ACx = boxVecs_(0, 1) * boxVecs_(2, 2) - boxVecs_(0, 2) * boxVecs_(2, 1);
Real ACy = boxVecs_(0, 0) * boxVecs_(2, 2) - boxVecs_(0, 2) * boxVecs_(2, 0);
Real ACz = boxVecs_(0, 0) * boxVecs_(2, 1) - boxVecs_(0, 1) * boxVecs_(2, 0);
Real BCx = boxVecs_(1, 1) * boxVecs_(2, 2) - boxVecs_(1, 2) * boxVecs_(2, 1);
Real BCy = boxVecs_(1, 0) * boxVecs_(2, 2) - boxVecs_(1, 2) * boxVecs_(2, 0);
Real BCz = boxVecs_(1, 0) * boxVecs_(2, 1) - boxVecs_(1, 1) * boxVecs_(2, 0);
Real AxBnorm = std::sqrt(ABx * ABx + ABy * ABy + ABz * ABz);
Real AxCnorm = std::sqrt(ACx * ACx + ACy * ACy + ACz * ACz);
Real BxCnorm = std::sqrt(BCx * BCx + BCy * BCy + BCz * BCz);
Real minDim = 2 * sphericalCutoff;
if (V / AxBnorm < minDim || V / AxCnorm < minDim || V / BxCnorm < minDim)
throw std::runtime_error("The cutoff used must be less than half of the minimum of three box widths");
}
/*!
* \brief sanityChecks just makes sure that inputs have consistent dimensions, and that prerequisites are
* initialized.
* \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for
* quadrupoles, etc.).
* \param parameters the input parameters.
* \param coordinates the input coordinates.
* \param cartesianOffset an offset to the start of the angular momentum shell for the parameters, in cases where
* only a single angular momentum shell is to be processed (rather than all shells up to a given angular momentum).
*/
void sanityChecks(int parameterAngMom, const RealMat ¶meters, const RealMat &coordinates,
int cartesianOffset = 0) {
assertInitialized();
if (parameterAngMom < 0)
throw std::runtime_error("Negative parameter angular momentum found where positive value was expected");
if (boxVecs_.isNearZero())
throw std::runtime_error(
"Lattice vectors have not been set yet! Call setLatticeVectors(...) before runPME(...);");
if (coordinates.nRows() != parameters.nRows())
throw std::runtime_error(
"Inconsistent number of coordinates and parameters; there should be nAtoms of each.");
if (parameters.nCols() != (nCartesian(parameterAngMom) - cartesianOffset))
throw std::runtime_error(
"Mismatch in the number of parameters provided and the parameter angular momentum");
}
/*!
* \brief cacheInfluenceFunctionImpl computes the influence function used in convolution, for later use.
* \tparam rPower the exponent of the (inverse) distance kernel (e.g. 1 for Coulomb, 6 for attractive dispersion).
* \param myNx the subset of the grid in the x direction to be handled by this node.
* \param myNy the subset of the grid in the y direction to be handled by this node.
* \param myNz the subset of the grid in the z direction to be handled by this node.
* \param startX the starting reciprocal space sum term handled by this node in the X direction.
* \param startY the starting reciprocal space sum term handled by this node in the Y direction.
* \param startZ the starting reciprocal space sum term handled by this node in the Z direction.
* \param scaleFactor a scale factor to be applied to all computed energies and derivatives thereof (e.g. the
* 1 / [4 pi epslion0] for Coulomb calculations).
* \param gridPtr the Fourier space grid, with ordering YXZ.
* \param boxInv the reciprocal lattice vectors.
* \param volume the volume of the unit cell.
* \param kappa the attenuation parameter in units inverse of those used to specify coordinates.
* \param xMods the Fourier space norms of the x B-Splines.
* \param yMods the Fourier space norms of the y B-Splines.
* \param zMods the Fourier space norms of the z B-Splines.
* \param xMVals the integer prefactors to iterate over reciprocal vectors in the x dimension.
* \param yMVals the integer prefactors to iterate over reciprocal vectors in the y dimension.
* \param zMVals the integer prefactors to iterate over reciprocal vectors in the z dimension.
* This vector is incremented, not assigned.
* \param nThreads the number of OpenMP threads to use.
* \return the energy for the m=0 term.
*/
template <int rPower>
static void cacheInfluenceFunctionImpl(int myNx, int myNy, int myNz, int startX, int startY, int startZ,
Real scaleFactor, RealVec &influenceFunction, const RealMat &boxInv,
Real volume, Real kappa, const Real *xMods, const Real *yMods,
const Real *zMods, const int *xMVals, const int *yMVals, const int *zMVals,
int nThreads) {
bool nodeZero = startX == 0 && startY == 0 && startZ == 0;
size_t nxz = (size_t)myNx * myNz;
size_t nyxz = myNy * nxz;
influenceFunction.resize(nyxz);
Real *gridPtr = influenceFunction.data();
if (nodeZero) gridPtr[0] = 0;
Real bPrefac = HELPME_PI * HELPME_PI / (kappa * kappa);
Real volPrefac =
scaleFactor * pow(HELPME_PI, rPower - 1) / (HELPME_SQRTPI * gammaComputer<Real, rPower>::value * volume);
const Real *boxPtr = boxInv[0];
// Exclude m=0 cell.
int start = (nodeZero ? 1 : 0);
// Writing the three nested loops in one allows for better load balancing in parallel.
#pragma omp parallel for num_threads(nThreads)
for (size_t yxz = start; yxz < nyxz; ++yxz) {
size_t xz = yxz % nxz;
short ky = yxz / nxz;
short kx = xz / myNz;
short kz = xz % myNz;
const Real mx = (Real)xMVals[kx];
const Real my = (Real)yMVals[ky];
const Real mz = (Real)zMVals[kz];
Real mVecX = boxPtr[0] * mx + boxPtr[1] * my + boxPtr[2] * mz;
Real mVecY = boxPtr[3] * mx + boxPtr[4] * my + boxPtr[5] * mz;
Real mVecZ = boxPtr[6] * mx + boxPtr[7] * my + boxPtr[8] * mz;
Real mNormSq = mVecX * mVecX + mVecY * mVecY + mVecZ * mVecZ;
Real mTerm = raiseNormToIntegerPower<Real, rPower - 3>::compute(mNormSq);
Real bSquared = bPrefac * mNormSq;
Real incompleteGammaTerm = incompleteGammaComputer<Real, 3 - rPower>::compute(bSquared);
gridPtr[yxz] = volPrefac * incompleteGammaTerm * mTerm * yMods[ky] * xMods[kx] * zMods[kz];
}
}
/*!
* \brief dirEImpl computes the kernel for the direct energy for a pair.
* \tparam rPower the exponent of the (inverse) distance kernel (e.g. 1 for Coulomb, 6 for attractive dispersion).
* \param rSquared the square of the internuclear distance
* \param kappaSquared the square of attenuation parameter in units inverse of those used to specify coordinates.
* \return the energy kernel.
*/
template <int rPower>
inline static Real dirEImpl(Real rSquared, Real kappaSquared) {
Real denominator = raiseNormToIntegerPower<Real, rPower>::compute(rSquared);
Real gammaTerm = incompleteGammaComputer<Real, rPower>::compute(rSquared * kappaSquared) /
gammaComputer<Real, rPower>::value;
return gammaTerm / denominator;
}
/*!
* \brief dirEFImpl computes the kernels for the direct energy and force for a pair.
* \tparam rPower the exponent of the (inverse) distance kernel (e.g. 1 for Coulomb, 6 for attractive dispersion).
* \param rSquared the square of the internuclear distance
* \param kappa the attenuation parameter in units inverse of those used to specify coordinates.
* \param kappaSquared the square of attenuation parameter in units inverse of those used to specify coordinates.
* \return a tuple containing the energy and force kernels, respectively.
*/
template <int rPower>
inline static std::tuple<Real, Real> dirEFImpl(Real rSquared, Real kappa, Real kappaSquared) {
Real rInv = 1 / rSquared;
Real kappaToRPower = kappa;
for (int i = 1; i < rPower; ++i) kappaToRPower *= kappa;
Real denominator = raiseNormToIntegerPower<Real, rPower>::compute(rSquared);
Real gammaTerm = incompleteGammaComputer<Real, rPower>::compute(rSquared * kappaSquared) /
gammaComputer<Real, rPower>::value;
Real eKernel = gammaTerm / denominator;
Real fKernel = -rPower * eKernel * rInv -
2 * rInv * exp(-kappaSquared * rSquared) * kappaToRPower / gammaComputer<Real, rPower>::value;
return std::make_tuple(eKernel, fKernel);
}
/*!
* \brief adjEImpl computes the kernel for the adjusted energy for a pair.
* \tparam rPower the exponent of the (inverse) distance kernel (e.g. 1 for Coulomb, 6 for attractive dispersion).
* \param rSquared the square of the internuclear distance
* \param kappaSquared the square of attenuation parameter in units inverse of those used to specify coordinates.
* \return the energy kernel.
*/
template <int rPower>
inline static Real adjEImpl(Real rSquared, Real kappaSquared) {
Real denominator = raiseNormToIntegerPower<Real, rPower>::compute(rSquared);
Real gammaTerm = incompleteGammaComputer<Real, rPower>::compute(rSquared * kappaSquared) /
gammaComputer<Real, rPower>::value;
return (gammaTerm - 1) / denominator;
}
/*!
* \brief adjEFImpl computes the kernels for the adjusted energy and force for a pair.
* \tparam rPower the exponent of the (inverse) distance kernel (e.g. 1 for Coulomb, 6 for attractive dispersion).
* \param rSquared the square of the internuclear distance
* \param kappa the attenuation parameter in units inverse of those used to specify coordinates.
* \param kappaSquared the square of attenuation parameter in units inverse of those used to specify coordinates.
* \return a tuple containing the energy and force kernels, respectively.
*/
template <int rPower>
inline static std::tuple<Real, Real> adjEFImpl(Real rSquared, Real kappa, Real kappaSquared) {
Real rInv = 1 / rSquared;
Real kappaToRPower = kappa;
for (int i = 1; i < rPower; ++i) kappaToRPower *= kappa;
Real denominator = raiseNormToIntegerPower<Real, rPower>::compute(rSquared);
Real gammaTerm = incompleteGammaComputer<Real, rPower>::compute(rSquared * kappaSquared) /
gammaComputer<Real, rPower>::value;
Real eKernel = (gammaTerm - 1) / denominator;
Real fKernel = -rPower * eKernel * rInv -
2 * rInv * exp(-kappaSquared * rSquared) * kappaToRPower / gammaComputer<Real, rPower>::value;
return std::make_tuple(eKernel, fKernel);
}
/*!
* \brief slfEImpl computes the coefficient to be applied to the sum of squared parameters for the self energy
* due to particles feeling their own potential.
* \tparam rPower the exponent of the (inverse) distance kernel (e.g. 1 for Coulomb, 6 for attractive dispersion).
* \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for quadrupoles,
* etc.).
* \param parameters the list of parameters associated with each atom (charges, C6 coefficients, multipoles,
* etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL is expected, where nL =
* (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering
*
* 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ...
*
* i.e. generated by the python loops
* \code{.py}
* for L in range(maxAM+1):
* for Lz in range(0,L+1):
* for Ly in range(0, L - Lz + 1):
* Lx = L - Ly - Lz
* \endcode
* \param kappa the attenuation parameter in units inverse of those used to specify coordinates.
* \param scaleFactor a scale factor to be applied to all computed energies and derivatives thereof
* (e.g. the 1 / [4 pi epslion0] for Coulomb calculations).
* \return the coefficient for the sum of squared parameters in the self energy. N.B. there is no self force
* associated with this term.
*/
template <int rPower>
static Real slfEImpl(int parameterAngMom, Real kappa, Real scaleFactor) {
if (parameterAngMom) throw std::runtime_error("Multipole self terms have not been coded yet.");
return -scaleFactor * std::pow(kappa, rPower) / (rPower * gammaComputer<Real, rPower>::value);
}
/*!
* \brief common_init sets up information that is common to serial and parallel runs.
*/
void setupCalculationMetadata(int rPower, Real kappa, int splineOrder, int dimA, int dimB, int dimC, int maxKA,
int maxKB, int maxKC, Real scaleFactor, int nThreads, void *commPtrIn,
NodeOrder nodeOrder, int numNodesA, int numNodesB, int numNodesC) {
int numKSumTermsA = std::min(2 * maxKA + 1, dimA);
int numKSumTermsB = std::min(2 * maxKB + 1, dimB);
int numKSumTermsC = std::min(2 * maxKC + 1, dimC);
AlgorithmType algorithm = numKSumTermsA < dimA && numKSumTermsB < dimB && numKSumTermsC < dimC
? AlgorithmType::CompressedPME
: AlgorithmType::PME;
kappaHasChanged_ = kappa != kappa_;
numNodesHasChanged_ = numNodesA_ != numNodesA || numNodesB_ != numNodesB || numNodesC_ != numNodesC;
rPowerHasChanged_ = rPower_ != rPower;
gridDimensionHasChanged_ = gridDimensionA_ != dimA || gridDimensionB_ != dimB || gridDimensionC_ != dimC;
reciprocalSumDimensionHasChanged_ =
numKSumTermsA != numKSumTermsA_ || numKSumTermsB != numKSumTermsB_ || numKSumTermsC != numKSumTermsC_;
algorithmHasChanged_ = algorithmType_ != algorithm;
splineOrderHasChanged_ = splineOrder_ != splineOrder;
scaleFactorHasChanged_ = scaleFactor_ != scaleFactor;
if (kappaHasChanged_ || rPowerHasChanged_ || gridDimensionHasChanged_ || splineOrderHasChanged_ ||
numNodesHasChanged_ || scaleFactorHasChanged_ || algorithmHasChanged_ ||
requestedNumberOfThreads_ != nThreads) {
numNodesA_ = numNodesA;
numNodesB_ = numNodesB;
numNodesC_ = numNodesC;
myNodeRankA_ = myNodeRankB_ = myNodeRankC_ = 0;
#if HAVE_MPI == 1
if (commPtrIn) {
MPI_Comm const &communicator = *((MPI_Comm *)(commPtrIn));
mpiCommunicator_ = std::unique_ptr<MPIWrapper<Real>>(
new MPIWrapper<Real>(communicator, numNodesA, numNodesB, numNodesC));
switch (nodeOrder) {
case (NodeOrder::ZYX):
myNodeRankA_ = mpiCommunicator_->myRank_ % numNodesA;
myNodeRankB_ = (mpiCommunicator_->myRank_ % (numNodesB * numNodesA)) / numNodesA;
myNodeRankC_ = mpiCommunicator_->myRank_ / (numNodesB * numNodesA);
mpiCommunicatorA_ =
mpiCommunicator_->split(myNodeRankC_ * numNodesB + myNodeRankB_, myNodeRankA_);
mpiCommunicatorB_ =
mpiCommunicator_->split(myNodeRankC_ * numNodesA + myNodeRankA_, myNodeRankB_);
mpiCommunicatorC_ =
mpiCommunicator_->split(myNodeRankB_ * numNodesA + myNodeRankA_, myNodeRankC_);
break;
default:
throw std::runtime_error("Unknown NodeOrder in helpme::setupCalculationMetadata.");
}
}
#else // Have MPI
if (numNodesA * numNodesB * numNodesC > 1)
throw std::runtime_error(
"a parallel calculation has been setup, but helpme was not compiled with MPI. Make sure you "
"compile with -DHAVE_MPI=1 "
"in the list of compiler definitions.");
#endif // Have MPI
rPower_ = rPower;
algorithmType_ = algorithm;
splineOrder_ = splineOrder;
cacheLineSizeInReals_ = static_cast<Real>(sysconf(_SC_PAGESIZE) / sizeof(Real));
requestedNumberOfThreads_ = nThreads;
#ifdef _OPENMP
nThreads_ = nThreads ? nThreads : omp_get_max_threads();
#else
nThreads_ = 1;
#endif
scaleFactor_ = scaleFactor;
kappa_ = kappa;
size_t scratchSize;
int gridPaddingA = 0, gridPaddingB = 0, gridPaddingC = 0;
if (algorithm == AlgorithmType::CompressedPME) {
gridDimensionA_ = numNodesA * std::ceil(dimA / (float)numNodesA);
gridDimensionB_ = numNodesB * std::ceil(dimB / (float)numNodesB);
gridDimensionC_ = numNodesC * std::ceil(dimC / (float)numNodesC);
gridPaddingA = (numNodesA > 1 ? splineOrder - 1 : 0);
gridPaddingB = (numNodesB > 1 ? splineOrder - 1 : 0);
gridPaddingC = (numNodesC > 1 ? splineOrder - 1 : 0);
myGridDimensionA_ = gridDimensionA_ / numNodesA + gridPaddingA;
myGridDimensionB_ = gridDimensionB_ / numNodesB + gridPaddingB;
myGridDimensionC_ = gridDimensionC_ / numNodesC + gridPaddingC;
myFirstGridPointA_ = myNodeRankA_ * (myGridDimensionA_ - gridPaddingA);
myFirstGridPointB_ = myNodeRankB_ * (myGridDimensionB_ - gridPaddingB);
myFirstGridPointC_ = myNodeRankC_ * (myGridDimensionC_ - gridPaddingC);
myNumKSumTermsA_ = numNodesA == 1 ? numKSumTermsA : 2 * std::ceil((maxKA + 1.0) / numNodesA);
myNumKSumTermsB_ = numNodesB == 1 ? numKSumTermsB : 2 * std::ceil((maxKB + 1.0) / numNodesB);
myNumKSumTermsC_ = numNodesC == 1 ? numKSumTermsC : 2 * std::ceil((maxKC + 1.0) / numNodesC);
numKSumTermsA_ = myNumKSumTermsA_ * numNodesA;
numKSumTermsB_ = myNumKSumTermsB_ * numNodesB;
numKSumTermsC_ = myNumKSumTermsC_ * numNodesC;
firstKSumTermA_ = myNodeRankA_ * myNumKSumTermsA_;
firstKSumTermB_ = myNodeRankB_ * myNumKSumTermsB_;
firstKSumTermC_ = myNodeRankC_ * myNumKSumTermsC_;
fftHelperA_ = std::move(FFTWWrapper<Real>());
fftHelperB_ = std::move(FFTWWrapper<Real>());
fftHelperC_ = std::move(FFTWWrapper<Real>());
compressionCoefficientsA_ = RealMat(numKSumTermsA_, myGridDimensionA_);
compressionCoefficientsB_ = RealMat(numKSumTermsB_, myGridDimensionB_);
compressionCoefficientsC_ = RealMat(numKSumTermsC_, myGridDimensionC_);
scratchSize = (size_t)std::max(myGridDimensionA_, numKSumTermsA) *
std::max(myGridDimensionB_, numKSumTermsB) * std::max(myGridDimensionC_, numKSumTermsC);
} else {
gridDimensionA_ = findGridSize(dimA, {numNodesA_});
gridDimensionB_ = findGridSize(dimB, {numNodesB_ * numNodesC_});
gridDimensionC_ = findGridSize(dimC, {numNodesA_ * numNodesC_, numNodesB_ * numNodesC_});
gridPaddingA = gridPaddingB = gridPaddingC = 0;
myGridDimensionA_ = gridDimensionA_ / numNodesA_;
myGridDimensionB_ = gridDimensionB_ / numNodesB_;
myGridDimensionC_ = gridDimensionC_ / numNodesC_;
complexGridDimensionA_ = gridDimensionA_ / 2 + 1;
myComplexGridDimensionA_ = myGridDimensionA_ / 2 + 1;
numKSumTermsA_ = gridDimensionA_;
numKSumTermsB_ = gridDimensionB_;
numKSumTermsC_ = gridDimensionC_;
myNumKSumTermsA_ = myComplexGridDimensionA_;
myNumKSumTermsB_ = myGridDimensionB_ / numNodesC_;
myNumKSumTermsC_ = gridDimensionC_;
myFirstGridPointA_ = myNodeRankA_ * myGridDimensionA_;
myFirstGridPointB_ = myNodeRankB_ * myGridDimensionB_;
myFirstGridPointC_ = myNodeRankC_ * myGridDimensionC_;
firstKSumTermA_ = myNodeRankA_ * myComplexGridDimensionA_;
firstKSumTermB_ = myNodeRankB_ * myGridDimensionB_ + myNodeRankC_ * myGridDimensionB_ / numNodesC_;
firstKSumTermC_ = 0;
fftHelperA_ = std::move(FFTWWrapper<Real>(gridDimensionA_));
fftHelperB_ = std::move(FFTWWrapper<Real>(gridDimensionB_));
fftHelperC_ = std::move(FFTWWrapper<Real>(gridDimensionC_));
compressionCoefficientsA_ = RealMat();
compressionCoefficientsB_ = RealMat();
compressionCoefficientsC_ = RealMat();
scratchSize = (size_t)myGridDimensionC_ * myComplexGridDimensionA_ * myGridDimensionB_;
}
// Grid iterators to correctly wrap the grid when using splines.
gridIteratorA_ = makeGridIterator(gridDimensionA_, myFirstGridPointA_,
myFirstGridPointA_ + myGridDimensionA_, gridPaddingA);
gridIteratorB_ = makeGridIterator(gridDimensionB_, myFirstGridPointB_,
myFirstGridPointB_ + myGridDimensionB_, gridPaddingB);
gridIteratorC_ = makeGridIterator(gridDimensionC_, myFirstGridPointC_,
myFirstGridPointC_ + myGridDimensionC_, gridPaddingC);
// Divide C grid points among threads to avoid race conditions.
threadedGridIteratorC_.clear();
for (int thread = 0; thread < nThreads_; ++thread) {
GridIterator myIterator;
for (int cGridPoint = 0; cGridPoint < gridDimensionC_; ++cGridPoint) {
std::vector<std::pair<short, short>> splineIterator;
for (const auto &fullIterator : gridIteratorC_[cGridPoint]) {
if (fullIterator.first % nThreads_ == thread) {
splineIterator.push_back(fullIterator);
}
}
splineIterator.shrink_to_fit();
myIterator.push_back(splineIterator);
}
myIterator.shrink_to_fit();
threadedGridIteratorC_.push_back(myIterator);
}
threadedGridIteratorC_.shrink_to_fit();
// Assign a large default so that uninitialized values end up generating zeros later on
mValsA_.resize(myNumKSumTermsA_, 99);
mValsB_.resize(myNumKSumTermsB_, 99);
mValsC_.resize(myNumKSumTermsC_, 99);
if (algorithm == AlgorithmType::CompressedPME) {
// For compressed PME we order the m values as 0, 1, -1, 2, -2, ..., Kmax, -Kmax
// because we need to guarantee that +/- m pairs live on the same node for the virial.
mValsA_[0] = 0;
int startA = myNodeRankA_ ? 0 : 1;
for (int k = startA; k < (myNumKSumTermsA_ + (numNodesA_ == 1)) / 2; ++k) {
int m = myNodeRankA_ * myNumKSumTermsA_ / 2 + k;
mValsA_[startA + 2 * (k - startA)] = m;
mValsA_[startA + 2 * (k - startA) + 1] = -m;
}
mValsB_[0] = 0;
int startB = myNodeRankB_ ? 0 : 1;
for (int k = startB; k < (myNumKSumTermsB_ + (numNodesB_ == 1)) / 2; ++k) {
int m = myNodeRankB_ * myNumKSumTermsB_ / 2 + k;
mValsB_[startB + 2 * (k - startB)] = m;
mValsB_[startB + 2 * (k - startB) + 1] = -m;
}
mValsC_[0] = 0;
int startC = myNodeRankC_ ? 0 : 1;
for (int k = startC; k < (myNumKSumTermsC_ + (numNodesC_ == 1)) / 2; ++k) {
int m = myNodeRankC_ * myNumKSumTermsC_ / 2 + k;
mValsC_[startC + 2 * (k - startC)] = m;
mValsC_[startC + 2 * (k - startC) + 1] = -m;
}
std::fill(compressionCoefficientsA_[0], compressionCoefficientsA_[1], 1);
for (int node = 0; node < numNodesA_; ++node) {
int offset = node ? 0 : 1;
for (int m = offset; m < (myNumKSumTermsA_ + (numNodesA_ == 1)) / 2; ++m) {
int fullM = m + node * myNumKSumTermsA_ / 2;
Real *rowPtr = compressionCoefficientsA_[offset + 2 * (fullM - offset)];
for (int n = 0; n < myGridDimensionA_; ++n) {
Real exponent = 2 * HELPME_PI * fullM * (n + myFirstGridPointA_) / gridDimensionA_;
rowPtr[n] = std::sqrt(2) * std::cos(exponent);
rowPtr[n + myGridDimensionA_] = std::sqrt(2) * std::sin(exponent);
}
}
}
std::fill(compressionCoefficientsB_[0], compressionCoefficientsB_[1], 1);
for (int node = 0; node < numNodesB_; ++node) {
int offset = node ? 0 : 1;
for (int m = offset; m < (myNumKSumTermsB_ + (numNodesB_ == 1)) / 2; ++m) {
int fullM = m + node * myNumKSumTermsB_ / 2;
Real *rowPtr = compressionCoefficientsB_[offset + 2 * (fullM - offset)];
for (int n = 0; n < myGridDimensionB_; ++n) {
Real exponent = 2 * HELPME_PI * fullM * (n + myFirstGridPointB_) / gridDimensionB_;
rowPtr[n] = std::sqrt(2) * std::cos(exponent);
rowPtr[n + myGridDimensionB_] = std::sqrt(2) * std::sin(exponent);
}
}
}
std::fill(compressionCoefficientsC_[0], compressionCoefficientsC_[1], 1);
for (int node = 0; node < numNodesC_; ++node) {
int offset = node ? 0 : 1;
for (int m = offset; m < (myNumKSumTermsC_ + (numNodesC_ == 1)) / 2; ++m) {
int fullM = m + node * myNumKSumTermsC_ / 2;
Real *rowPtr = compressionCoefficientsC_[offset + 2 * (fullM - offset)];
for (int n = 0; n < myGridDimensionC_; ++n) {
Real exponent = 2 * HELPME_PI * fullM * (n + myFirstGridPointC_) / gridDimensionC_;
rowPtr[n] = std::sqrt(2) * std::cos(exponent);
rowPtr[n + myGridDimensionC_] = std::sqrt(2) * std::sin(exponent);
}
}
}
// Fourier space spline norms.
Spline spline = Spline(0, 0, splineOrder_, 0);
splineModA_ = spline.invSplineModuli(gridDimensionA_, mValsA_);
splineModB_ = spline.invSplineModuli(gridDimensionB_, mValsB_);
splineModC_ = spline.invSplineModuli(gridDimensionC_, mValsC_);
} else {
// For conventional PME we order the m values as 0, 1, 2, 3, .., Kmax, -Kmax, -Kmax+1, .., -2, -1
// because this is consistent with the ordering of m values that emerge from the FFT.
for (int ka = 0; ka < myNumKSumTermsA_; ++ka) {
mValsA_[ka] = firstKSumTermA_ +
(ka + firstKSumTermA_ >= (gridDimensionA_ + 1) / 2 ? ka - gridDimensionA_ : ka);
}
for (int kb = 0; kb < myNumKSumTermsB_; ++kb) {
mValsB_[kb] = firstKSumTermB_ +
(kb + firstKSumTermB_ >= (gridDimensionB_ + 1) / 2 ? kb - gridDimensionB_ : kb);
}
for (int kc = 0; kc < myNumKSumTermsC_; ++kc) {
mValsC_[kc] = firstKSumTermC_ +
(kc + firstKSumTermC_ >= (gridDimensionC_ + 1) / 2 ? kc - gridDimensionC_ : kc);
}
// Fourier space spline norms.
Spline spline = Spline(0, 0, splineOrder_, 0);
auto fullSplineModA = spline.invSplineModuli(gridDimensionA_);
auto fullSplineModB = spline.invSplineModuli(gridDimensionB_);
auto fullSplineModC = spline.invSplineModuli(gridDimensionC_);
scaledRecVecs_ = recVecs_.clone();
scaledRecVecs_.row(0) *= gridDimensionA_;
scaledRecVecs_.row(1) *= gridDimensionB_;
scaledRecVecs_.row(2) *= gridDimensionC_;
splineModA_.resize(myNumKSumTermsA_);
splineModB_.resize(myNumKSumTermsB_);
splineModC_.resize(myNumKSumTermsC_);
std::copy(&fullSplineModA[firstKSumTermA_], &fullSplineModA[firstKSumTermA_ + myNumKSumTermsA_],
splineModA_.begin());
std::copy(&fullSplineModB[firstKSumTermB_], &fullSplineModB[firstKSumTermB_ + myNumKSumTermsB_],
splineModB_.begin());
std::copy(&fullSplineModC[firstKSumTermC_], &fullSplineModC[firstKSumTermC_ + myNumKSumTermsC_],
splineModC_.begin());
}
// Set up function pointers by instantiating the appropriate evaluation functions. We could add many more
// entries by default here, but don't right now to avoid code bloat. To add an extra rPower kernel is a
// trivial cut and paste exercise; just add a new line with the desired 1/R power as the macro's argument.
switch (rPower) {
ENABLE_KERNEL_WITH_INVERSE_R_EXPONENT_OF(1);
ENABLE_KERNEL_WITH_INVERSE_R_EXPONENT_OF(6);
default:
std::string msg("Bad rPower requested. To fix this, add the appropriate entry in");
msg += __FILE__;
msg += ", line number ";
msg += std::to_string(__LINE__ - 5);
throw std::runtime_error(msg.c_str());
break;
}
subsetOfCAlongA_ = myGridDimensionC_ / numNodesA_;
subsetOfCAlongB_ = myGridDimensionC_ / numNodesB_;
subsetOfBAlongC_ = myGridDimensionB_ / numNodesC_;
workSpace1_ = helpme::vector<Complex>(scratchSize);
workSpace2_ = helpme::vector<Complex>(scratchSize);
#if HAVE_MKL
mkl_set_num_threads(nThreads_);
#endif
}
}
public:
/*!
* \brief Spread the parameters onto the charge grid. Generally this shouldn't be called;
* use the various computeE() methods instead. This the more efficient version that filters
* the atom list and uses pre-computed splines. Therefore, the splineCache_
* member must have been updated via a call to filterAtomsAndBuildSplineCache() first.
* \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for
* quadrupoles, etc.). \param parameters the list of parameters associated with each atom (charges, C6
* coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL
* is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering
*
* 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ...
*
* i.e. generated by the python loops
* \code{.py}
* for L in range(maxAM+1):
* for Lz in range(0,L+1):
* for Ly in range(0, L - Lz + 1):
* Lx = L - Ly - Lz
* \endcode
* \return realGrid the array of discretized parameters (stored in CBA order).
*/
Real *spreadParameters(int parameterAngMom, const RealMat ¶meters) {
Real *realGrid = reinterpret_cast<Real *>(workSpace1_.data());
updateAngMomIterator(parameterAngMom);
// We need to figure out whether the incoming parameters need to be transformed to scaled fractional
// coordinates or not, which is only needed for angular momentum higher than zero.
RealMat tempParams;
if (parameterAngMom) {
tempParams = cartesianTransform(parameterAngMom, false, scaledRecVecs_.transpose(), parameters);
}
const auto &fractionalParameters = parameterAngMom ? tempParams : parameters;
int nComponents = nCartesian(parameterAngMom);
size_t numBA = (size_t)myGridDimensionB_ * myGridDimensionA_;
#pragma omp parallel num_threads(nThreads_)
{
#ifdef _OPENMP
int threadID = omp_get_thread_num();
#else
int threadID = 0;
#endif
for (size_t row = threadID; row < myGridDimensionC_; row += nThreads_) {
std::fill(&realGrid[row * numBA], &realGrid[(row + 1) * numBA], Real(0));
}
for (const auto &spline : splinesPerThread_[threadID]) {
const auto &cacheEntry = splineCache_[spline];
const int &atom = cacheEntry.absoluteAtomNumber;
const auto &splineA = cacheEntry.aSpline;
const auto &splineB = cacheEntry.bSpline;
const auto &splineC = cacheEntry.cSpline;
spreadParametersImpl(atom, realGrid, nComponents, splineA, splineB, splineC, fractionalParameters,
threadID);
}
}
return realGrid;
}
/*!
* \brief filterAtomsAndBuildSplineCache builds a list of BSplines for only the atoms to be handled by this node.
* \param splineDerivativeLevel the derivative level (parameter angular momentum + energy derivative level) of the
* BSplines.
* \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}.
*/
void filterAtomsAndBuildSplineCache(int splineDerivativeLevel, const RealMat &coords) {
assertInitialized();
constexpr float EPS = 1e-6;
size_t nAtoms = coords.nRows();
numAtomsPerThread_.resize(nThreads_);
splinesPerThread_.resize(nThreads_);
gridAtomList_.resize(gridDimensionC_);
// Classify atoms to their worker threads first, then construct splines for each thread
#pragma omp parallel num_threads(nThreads_)
{
#ifdef _OPENMP
int threadID = omp_get_thread_num();
#else
int threadID = 0;
#endif
for (size_t row = threadID; row < gridDimensionC_; row += nThreads_) {
gridAtomList_[row].clear();
}
auto &mySplineList = splinesPerThread_[threadID];
const auto &gridIteratorC = threadedGridIteratorC_[threadID];
mySplineList.clear();
size_t myNumAtoms = 0;
for (int atom = 0; atom < nAtoms; ++atom) {
const Real *atomCoords = coords[atom];
Real cCoord = atomCoords[0] * recVecs_(0, 2) + atomCoords[1] * recVecs_(1, 2) +
atomCoords[2] * recVecs_(2, 2) - EPS;
cCoord -= floor(cCoord);
short cStartingGridPoint = gridDimensionC_ * cCoord;
size_t thisAtomsThread = cStartingGridPoint % nThreads_;
const auto &cGridIterator = gridIteratorC_[cStartingGridPoint];
if (cGridIterator.size() && thisAtomsThread == threadID) {
Real aCoord = atomCoords[0] * recVecs_(0, 0) + atomCoords[1] * recVecs_(1, 0) +
atomCoords[2] * recVecs_(2, 0) - EPS;
Real bCoord = atomCoords[0] * recVecs_(0, 1) + atomCoords[1] * recVecs_(1, 1) +
atomCoords[2] * recVecs_(2, 1) - EPS;
// Make sure the fractional coordinates fall in the range 0 <= s < 1
aCoord -= floor(aCoord);
bCoord -= floor(bCoord);
short aStartingGridPoint = gridDimensionA_ * aCoord;
short bStartingGridPoint = gridDimensionB_ * bCoord;
const auto &aGridIterator = gridIteratorA_[aStartingGridPoint];
const auto &bGridIterator = gridIteratorB_[bStartingGridPoint];
uint32_t startingGridPoint = cStartingGridPoint * gridDimensionB_ * gridDimensionA_ +
bStartingGridPoint * gridDimensionA_ + aStartingGridPoint;
if (aGridIterator.size() && bGridIterator.size()) {
gridAtomList_[cStartingGridPoint].emplace(startingGridPoint, atom);
++myNumAtoms;
}
}
}
numAtomsPerThread_[threadID] = myNumAtoms;
}
// We could intervene here and do some load balancing by inspecting the list. Currently
// the lazy approach of just assuming that the atoms are evenly distributed along c is used.
size_t numCacheEntries = std::accumulate(numAtomsPerThread_.begin(), numAtomsPerThread_.end(), 0);
// Now we know how many atoms we loop over the dense list, redefining nAtoms accordingly.
// The first stage above is to get the number of atoms, so we can avoid calling push_back
// and thus avoid the many memory allocations. If the cache is too small, grow it by a
// certain scale factor to try and minimize allocations in a not-too-wasteful manner.
if (splineCache_.size() < numCacheEntries) {
size_t newSize = static_cast<size_t>(1.2 * numCacheEntries);
for (int atom = splineCache_.size(); atom < newSize; ++atom)
splineCache_.emplace_back(splineOrder_, splineDerivativeLevel);
}
std::vector<size_t> threadOffset(nThreads_, 0);
for (int thread = 1; thread < nThreads_; ++thread) {
threadOffset[thread] = threadOffset[thread - 1] + numAtomsPerThread_[thread - 1];
}
#pragma omp parallel num_threads(nThreads_)
{
#ifdef _OPENMP
int threadID = omp_get_thread_num();
#else
int threadID = 0;
#endif
size_t entry = threadOffset[threadID];
for (size_t cRow = threadID; cRow < gridDimensionC_; cRow += nThreads_) {
for (const auto &gridPointAndAtom : gridAtomList_[cRow]) {
size_t atom = gridPointAndAtom.second;
const Real *atomCoords = coords[atom];
Real aCoord = atomCoords[0] * recVecs_(0, 0) + atomCoords[1] * recVecs_(1, 0) +
atomCoords[2] * recVecs_(2, 0) - EPS;
Real bCoord = atomCoords[0] * recVecs_(0, 1) + atomCoords[1] * recVecs_(1, 1) +
atomCoords[2] * recVecs_(2, 1) - EPS;
Real cCoord = atomCoords[0] * recVecs_(0, 2) + atomCoords[1] * recVecs_(1, 2) +
atomCoords[2] * recVecs_(2, 2) - EPS;
// Make sure the fractional coordinates fall in the range 0 <= s < 1
aCoord -= floor(aCoord);
bCoord -= floor(bCoord);
cCoord -= floor(cCoord);
short aStartingGridPoint = gridDimensionA_ * aCoord;
short bStartingGridPoint = gridDimensionB_ * bCoord;
short cStartingGridPoint = gridDimensionC_ * cCoord;
auto &atomSplines = splineCache_[entry++];
atomSplines.absoluteAtomNumber = atom;
atomSplines.aSpline.update(aStartingGridPoint, gridDimensionA_ * aCoord - aStartingGridPoint,
splineOrder_, splineDerivativeLevel);
atomSplines.bSpline.update(bStartingGridPoint, gridDimensionB_ * bCoord - bStartingGridPoint,
splineOrder_, splineDerivativeLevel);
atomSplines.cSpline.update(cStartingGridPoint, gridDimensionC_ * cCoord - cStartingGridPoint,
splineOrder_, splineDerivativeLevel);
}
}
}
// Finally, find all of the splines that this thread will need to handle
#pragma omp parallel num_threads(nThreads_)
{
#ifdef _OPENMP
int threadID = omp_get_thread_num();
#else
int threadID = 0;
#endif
auto &mySplineList = splinesPerThread_[threadID];
mySplineList.clear();
const auto &gridIteratorC = threadedGridIteratorC_[threadID];
size_t count = 0;
for (size_t atom = 0; atom < numCacheEntries; ++atom) {
if (gridIteratorC[splineCache_[atom].cSpline.startingGridPoint()].size()) {
mySplineList.emplace_back(count);
}
++count;
}
}
}
/*!
* \brief cellVolume Compute the volume of the unit cell.
* \return volume in units consistent with those used to define the lattice vectors.
*/
Real cellVolume() {
return boxVecs_(0, 0) * boxVecs_(1, 1) * boxVecs_(2, 2) - boxVecs_(0, 0) * boxVecs_(1, 2) * boxVecs_(2, 1) +
boxVecs_(0, 1) * boxVecs_(1, 2) * boxVecs_(2, 0) - boxVecs_(0, 1) * boxVecs_(1, 0) * boxVecs_(2, 2) +
boxVecs_(0, 2) * boxVecs_(1, 0) * boxVecs_(2, 1) - boxVecs_(0, 2) * boxVecs_(1, 1) * boxVecs_(2, 0);
}
/*!
* \brief minimumImageDeltaR Computes deltaR = positionJ - positionI, applying the minimum image convention to the
* result \param positionI \param positionJ \return minimum image deltaR
*/
std::array<Real, 3> minimumImageDeltaR(const typename helpme::Matrix<Real>::sliceIterator &positionI,
const typename helpme::Matrix<Real>::sliceIterator &positionJ) {
// This implementation could be specialized for orthorhombic unit cells, but we stick with a general
// implementation for now. The difference in real (R) space
Real dxR = positionJ[0] - positionI[0];
Real dyR = positionJ[1] - positionI[1];
Real dzR = positionJ[2] - positionI[2];
// Convert to fractional coordinate (S) space
Real dxS = recVecs_[0][0] * dxR + recVecs_[0][1] * dyR + recVecs_[0][2] * dzR;
Real dyS = recVecs_[1][0] * dxR + recVecs_[1][1] * dyR + recVecs_[1][2] * dzR;
Real dzS = recVecs_[2][0] * dxR + recVecs_[2][1] * dyR + recVecs_[2][2] * dzR;
// Apply translations in fractional coordinates to find the shift vectors
Real sxS = std::floor(dxS + 0.5f);
Real syS = std::floor(dyS + 0.5f);
Real szS = std::floor(dzS + 0.5f);
// Convert fractional coordinate shifts to real space
Real sxR = boxVecs_[0][0] * sxS + boxVecs_[0][1] * syS + boxVecs_[0][2] * szS;
Real syR = boxVecs_[1][0] * sxS + boxVecs_[1][1] * syS + boxVecs_[1][2] * szS;
Real szR = boxVecs_[2][0] * sxS + boxVecs_[2][1] * syS + boxVecs_[2][2] * szS;
// Shift the difference vector to find the minimum image
return {dxR - sxR, dyR - syR, dzR - szR};
}
/*!
* \brief Sets the unit cell lattice vectors, with units consistent with those used to specify coordinates.
* \param A the A lattice parameter in units consistent with the coordinates.
* \param B the B lattice parameter in units consistent with the coordinates.
* \param C the C lattice parameter in units consistent with the coordinates.
* \param alpha the alpha lattice parameter in degrees.
* \param beta the beta lattice parameter in degrees.
* \param gamma the gamma lattice parameter in degrees.
* \param latticeType how to arrange the lattice vectors. Options are
* ShapeMatrix: enforce a symmetric representation of the lattice vectors [c.f. S. Nosé and M. L. Klein,
* Mol. Phys. 50 1055 (1983)] particularly appendix C.
* XAligned: make the A vector coincide with the X axis, the B vector fall in the XY plane, and the C vector
* take the appropriate alignment to completely define the system.
*/
void setLatticeVectors(Real A, Real B, Real C, Real alpha, Real beta, Real gamma, LatticeType latticeType) {
if (A != cellA_ || B != cellB_ || C != cellC_ || alpha != cellAlpha_ || beta != cellBeta_ ||
gamma != cellGamma_ || latticeType != latticeType_) {
if (latticeType == LatticeType::ShapeMatrix) {
RealMat HtH(3, 3);
HtH(0, 0) = A * A;
HtH(1, 1) = B * B;
HtH(2, 2) = C * C;
const float TOL = 1e-4f;
// Check for angles very close to 90, to avoid noise from the eigensolver later on.
HtH(0, 1) = HtH(1, 0) = std::abs(gamma - 90) < TOL ? 0 : A * B * std::cos(HELPME_PI * gamma / 180);
HtH(0, 2) = HtH(2, 0) = std::abs(beta - 90) < TOL ? 0 : A * C * std::cos(HELPME_PI * beta / 180);
HtH(1, 2) = HtH(2, 1) = std::abs(alpha - 90) < TOL ? 0 : B * C * std::cos(HELPME_PI * alpha / 180);
auto eigenTuple = HtH.diagonalize();
RealMat evalsReal = std::get<0>(eigenTuple);
RealMat evecs = std::get<1>(eigenTuple);
for (int i = 0; i < 3; ++i) evalsReal(i, 0) = sqrt(evalsReal(i, 0));
boxVecs_.setZero();
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < 3; ++j) {
for (int k = 0; k < 3; ++k) {
boxVecs_(i, j) += evecs(i, k) * evecs(j, k) * evalsReal(k, 0);
}
}
}
recVecs_ = boxVecs_.inverse();
} else if (latticeType == LatticeType::XAligned) {
boxVecs_(0, 0) = A;
boxVecs_(0, 1) = 0;
boxVecs_(0, 2) = 0;
boxVecs_(1, 0) = B * std::cos(HELPME_PI / 180 * gamma);
boxVecs_(1, 1) = B * std::sin(HELPME_PI / 180 * gamma);
boxVecs_(1, 2) = 0;
boxVecs_(2, 0) = C * std::cos(HELPME_PI / 180 * beta);
boxVecs_(2, 1) =
(B * C * cos(HELPME_PI / 180 * alpha) - boxVecs_(2, 0) * boxVecs_(1, 0)) / boxVecs_(1, 1);
boxVecs_(2, 2) = std::sqrt(C * C - boxVecs_(2, 0) * boxVecs_(2, 0) - boxVecs_(2, 1) * boxVecs_(2, 1));
} else {
throw std::runtime_error("Unknown lattice type in setLatticeVectors");
}
recVecs_ = boxVecs_.inverse();
scaledRecVecs_ = recVecs_.clone();
scaledRecVecs_.row(0) *= gridDimensionA_;
scaledRecVecs_.row(1) *= gridDimensionB_;
scaledRecVecs_.row(2) *= gridDimensionC_;
cellA_ = A;
cellB_ = B;
cellC_ = C;
cellAlpha_ = alpha;
cellBeta_ = beta;
cellGamma_ = gamma;
latticeType_ = latticeType;
unitCellHasChanged_ = true;
} else {
unitCellHasChanged_ = false;
}
}
/*!
* \brief Performs the forward 3D FFT of the discretized parameter grid using the compressed PME algorithm.
* \param realGrid the array of discretized parameters (stored in CBA order,
* with A being the fast running index) to be transformed.
* \return Pointer to the transformed grid, which is stored in one of the buffers in BAC order.
*/
Real *compressedForwardTransform(Real *realGrid) {
Real *__restrict__ buffer1, *__restrict__ buffer2;
if (realGrid == reinterpret_cast<Real *>(workSpace1_.data())) {
buffer1 = reinterpret_cast<Real *>(workSpace2_.data());
buffer2 = reinterpret_cast<Real *>(workSpace1_.data());
} else {
buffer1 = reinterpret_cast<Real *>(workSpace1_.data());
buffer2 = reinterpret_cast<Real *>(workSpace2_.data());
}
// Transform A index
contractABxCWithDxC<Real>(realGrid, compressionCoefficientsA_[0], myGridDimensionC_ * myGridDimensionB_,
myGridDimensionA_, numKSumTermsA_, buffer1);
// Sort CBA->CAB
permuteABCtoACB(buffer1, myGridDimensionC_, myGridDimensionB_, numKSumTermsA_, buffer2, nThreads_);
// Transform B index
contractABxCWithDxC<Real>(buffer2, compressionCoefficientsB_[0], myGridDimensionC_ * numKSumTermsA_,
myGridDimensionB_, numKSumTermsB_, buffer1);
// Sort CAB->BAC
permuteABCtoCBA(buffer1, myGridDimensionC_, numKSumTermsA_, numKSumTermsB_, buffer2, nThreads_);
// Transform C index
contractABxCWithDxC<Real>(buffer2, compressionCoefficientsC_[0], numKSumTermsB_ * numKSumTermsA_,
myGridDimensionC_, numKSumTermsC_, buffer1);
#if HAVE_MPI == 1
int numNodes = numNodesA_ * numNodesB_ * numNodesC_;
if (numNodes > 1) {
// Resort the data to be grouped by node, for communication
for (int node = 0; node < numNodes; ++node) {
int nodeStartA = myNumKSumTermsA_ * (node % numNodesA_);
int nodeStartB = myNumKSumTermsB_ * ((node % (numNodesB_ * numNodesA_)) / numNodesA_);
int nodeStartC = myNumKSumTermsC_ * (node / (numNodesB_ * numNodesA_));
Real *outPtr = buffer2 + node * myNumKSumTermsA_ * myNumKSumTermsB_ * myNumKSumTermsC_;
for (int B = 0; B < myNumKSumTermsB_; ++B) {
const Real *inPtrB = buffer1 + (nodeStartB + B) * numKSumTermsA_ * numKSumTermsC_;
for (int A = 0; A < myNumKSumTermsA_; ++A) {
const Real *inPtrBA = inPtrB + (nodeStartA + A) * numKSumTermsC_;
const Real *inPtrBAC = inPtrBA + nodeStartC;
std::copy(inPtrBAC, inPtrBAC + myNumKSumTermsC_, outPtr);
outPtr += myNumKSumTermsC_;
}
}
}
mpiCommunicator_->reduceScatterBlock(buffer2, buffer1,
myNumKSumTermsA_ * myNumKSumTermsB_ * myNumKSumTermsC_);
}
#endif
return buffer1;
}
/*!
* \brief Performs the forward 3D FFT of the discretized parameter grid.
* \param realGrid the array of discretized parameters (stored in CBA order,
* with A being the fast running index) to be transformed.
* \return Pointer to the transformed grid, which is stored in one of the buffers in BAC order.
*/
Complex *forwardTransform(Real *realGrid) {
Real *__restrict__ realCBA;
Complex *__restrict__ buffer1, *__restrict__ buffer2;
if (realGrid == reinterpret_cast<Real *>(workSpace1_.data())) {
realCBA = reinterpret_cast<Real *>(workSpace2_.data());
buffer1 = workSpace2_.data();
buffer2 = workSpace1_.data();
} else {
realCBA = reinterpret_cast<Real *>(workSpace1_.data());
buffer1 = workSpace1_.data();
buffer2 = workSpace2_.data();
}
#if HAVE_MPI == 1
if (numNodesA_ > 1) {
// Communicate A along columns
mpiCommunicatorA_->allToAll(realGrid, realCBA, subsetOfCAlongA_ * myGridDimensionA_ * myGridDimensionB_);
// Resort the data to end up with realGrid holding a full row of A data, for B pencil and C subset.
for (int c = 0; c < subsetOfCAlongA_; ++c) {
Real *outC = realGrid + c * myGridDimensionB_ * gridDimensionA_;
for (int b = 0; b < myGridDimensionB_; ++b) {
for (int chunk = 0; chunk < numNodesA_; ++chunk) {
Real *inPtr = realCBA + (chunk * subsetOfCAlongA_ + c) * myGridDimensionB_ * myGridDimensionA_ +
b * myGridDimensionA_;
std::copy(inPtr, inPtr + myGridDimensionA_,
outC + b * gridDimensionA_ + chunk * myGridDimensionA_);
}
}
}
}
#endif
// Each parallel node allocates buffers of length dimA/(2 numNodesA)+1 for A, leading to a total of
// dimA/2 + numNodesA = complexDimA+numNodesA-1 if dimA is even
// and
// numNodesA (dimA-1)/2 + numNodesA = complexDimA + numNodesA/2-1 if dimA is odd
// We just allocate the larger size here, remembering that the final padding values on the last node
// will all be allocated to zero and will not contribute to the final answer.
const size_t scratchRowDim = complexGridDimensionA_ + numNodesA_ - 1;
helpme::vector<Complex> buffer(nThreads_ * scratchRowDim);
// A transform, with instant sort to CAB ordering for each local block
#pragma omp parallel num_threads(nThreads_)
{
#ifdef _OPENMP
int threadID = omp_get_thread_num();
#else
int threadID = 0;
#endif
auto scratch = &buffer[threadID * scratchRowDim];
#pragma omp for
for (int c = 0; c < subsetOfCAlongA_; ++c) {
for (int b = 0; b < myGridDimensionB_; ++b) {
Real *gridPtr = realGrid + c * myGridDimensionB_ * gridDimensionA_ + b * gridDimensionA_;
fftHelperA_.transform(gridPtr, scratch);
for (int chunk = 0; chunk < numNodesA_; ++chunk) {
for (int a = 0; a < myComplexGridDimensionA_; ++a) {
buffer1[(chunk * subsetOfCAlongA_ + c) * myComplexGridDimensionA_ * myGridDimensionB_ +
a * myGridDimensionB_ + b] = scratch[chunk * myComplexGridDimensionA_ + a];
}
}
}
}
}
#if HAVE_MPI == 1
// Communicate A back to blocks
if (numNodesA_ > 1) {
mpiCommunicatorA_->allToAll(buffer1, buffer2,
subsetOfCAlongA_ * myComplexGridDimensionA_ * myGridDimensionB_);
std::swap(buffer1, buffer2);
}
// Communicate B along rows
if (numNodesB_ > 1) {
mpiCommunicatorB_->allToAll(buffer1, buffer2,
subsetOfCAlongB_ * myComplexGridDimensionA_ * myGridDimensionB_);
// Resort the data to end up with the buffer holding a full row of B data, for A pencil and C subset.
for (int c = 0; c < subsetOfCAlongB_; ++c) {
Complex *cPtr = buffer1 + c * myComplexGridDimensionA_ * gridDimensionB_;
for (int a = 0; a < myComplexGridDimensionA_; ++a) {
for (int chunk = 0; chunk < numNodesB_; ++chunk) {
Complex *inPtr = buffer2 +
(chunk * subsetOfCAlongB_ + c) * myComplexGridDimensionA_ * myGridDimensionB_ +
a * myGridDimensionB_;
std::copy(inPtr, inPtr + myGridDimensionB_,
cPtr + a * gridDimensionB_ + chunk * myGridDimensionB_);
}
}
}
}
#endif
// B transform
size_t numCA = (size_t)subsetOfCAlongB_ * myComplexGridDimensionA_;
#pragma omp parallel for num_threads(nThreads_)
for (size_t ca = 0; ca < numCA; ++ca) {
fftHelperB_.transform(buffer1 + ca * gridDimensionB_, FFTW_FORWARD);
}
#if HAVE_MPI == 1
if (numNodesB_ > 1) {
for (int c = 0; c < subsetOfCAlongB_; ++c) {
Complex *zPtr = buffer1 + c * myComplexGridDimensionA_ * gridDimensionB_;
for (int a = 0; a < myComplexGridDimensionA_; ++a) {
for (int chunk = 0; chunk < numNodesB_; ++chunk) {
Complex *inPtr = zPtr + a * gridDimensionB_ + chunk * myGridDimensionB_;
Complex *outPtr =
buffer2 + (chunk * subsetOfCAlongB_ + c) * myComplexGridDimensionA_ * myGridDimensionB_ +
a * myGridDimensionB_;
std::copy(inPtr, inPtr + myGridDimensionB_, outPtr);
}
}
}
// Communicate B back to blocks
mpiCommunicatorB_->allToAll(buffer2, buffer1,
subsetOfCAlongB_ * myComplexGridDimensionA_ * myGridDimensionB_);
}
#endif
// sort local blocks from CAB to BAC order
permuteABCtoCBA(buffer1, myGridDimensionC_, myComplexGridDimensionA_, myGridDimensionB_, buffer2, nThreads_);
#if HAVE_MPI == 1
if (numNodesC_ > 1) {
// Communicate C along columns
mpiCommunicatorC_->allToAll(buffer2, buffer1,
subsetOfBAlongC_ * myComplexGridDimensionA_ * myGridDimensionC_);
for (int b = 0; b < subsetOfBAlongC_; ++b) {
Complex *outPtrB = buffer2 + b * myComplexGridDimensionA_ * gridDimensionC_;
for (int a = 0; a < myComplexGridDimensionA_; ++a) {
Complex *outPtrBA = outPtrB + a * gridDimensionC_;
for (int chunk = 0; chunk < numNodesC_; ++chunk) {
Complex *inPtr = buffer1 +
(chunk * subsetOfBAlongC_ + b) * myComplexGridDimensionA_ * myGridDimensionC_ +
a * myGridDimensionC_;
std::copy(inPtr, inPtr + myGridDimensionC_, outPtrBA + chunk * myGridDimensionC_);
}
}
}
}
#endif
// C transform
size_t numBA = (size_t)subsetOfBAlongC_ * myComplexGridDimensionA_;
#pragma omp parallel for num_threads(nThreads_)
for (size_t ba = 0; ba < numBA; ++ba) {
fftHelperC_.transform(buffer2 + ba * gridDimensionC_, FFTW_FORWARD);
}
return buffer2;
}
/*!
* \brief Performs the inverse 3D FFT.
* \param convolvedGrid the complex array of discretized parameters convolved with the influence function
* (stored in BAC order, with C being the fast running index) to be transformed.
* \return Pointer to the potential grid, which is stored in one of the buffers in CBA order.
*/
Real *inverseTransform(Complex *convolvedGrid) {
Complex *__restrict__ buffer1, *__restrict__ buffer2;
// Setup scratch, taking care not to overwrite the convolved grid.
if (convolvedGrid == workSpace1_.data()) {
buffer1 = workSpace2_.data();
buffer2 = workSpace1_.data();
} else {
buffer1 = workSpace1_.data();
buffer2 = workSpace2_.data();
}
// C transform
size_t numYX = (size_t)subsetOfBAlongC_ * myComplexGridDimensionA_;
#pragma omp parallel for num_threads(nThreads_)
for (size_t yx = 0; yx < numYX; ++yx) {
fftHelperC_.transform(convolvedGrid + yx * gridDimensionC_, FFTW_BACKWARD);
}
#if HAVE_MPI == 1
if (numNodesC_ > 1) {
// Communicate C back to blocks
for (int b = 0; b < subsetOfBAlongC_; ++b) {
Complex *inPtrB = convolvedGrid + b * myComplexGridDimensionA_ * gridDimensionC_;
for (int a = 0; a < myComplexGridDimensionA_; ++a) {
Complex *inPtrBA = inPtrB + a * gridDimensionC_;
for (int chunk = 0; chunk < numNodesC_; ++chunk) {
Complex *inPtrBAC = inPtrBA + chunk * myGridDimensionC_;
Complex *outPtr =
buffer1 + (chunk * subsetOfBAlongC_ + b) * myComplexGridDimensionA_ * myGridDimensionC_ +
a * myGridDimensionC_;
std::copy(inPtrBAC, inPtrBAC + myGridDimensionC_, outPtr);
}
}
}
mpiCommunicatorC_->allToAll(buffer1, buffer2,
subsetOfBAlongC_ * myComplexGridDimensionA_ * myGridDimensionC_);
}
#endif
// sort local blocks from BAC to CAB order
permuteABCtoCBA(buffer2, myGridDimensionB_, myComplexGridDimensionA_, myGridDimensionC_, buffer1, nThreads_);
#if HAVE_MPI == 1
// Communicate B along rows
if (numNodesB_ > 1) {
mpiCommunicatorB_->allToAll(buffer1, buffer2,
subsetOfCAlongB_ * myComplexGridDimensionA_ * myGridDimensionB_);
// Resort the data to end up with the buffer holding a full row of B data, for A pencil and C subset.
for (int c = 0; c < subsetOfCAlongB_; ++c) {
Complex *cPtr = buffer1 + c * myComplexGridDimensionA_ * gridDimensionB_;
for (int a = 0; a < myComplexGridDimensionA_; ++a) {
for (int chunk = 0; chunk < numNodesB_; ++chunk) {
Complex *inPtr = buffer2 +
(chunk * subsetOfCAlongB_ + c) * myComplexGridDimensionA_ * myGridDimensionB_ +
a * myGridDimensionB_;
std::copy(inPtr, inPtr + myGridDimensionB_,
cPtr + a * gridDimensionB_ + chunk * myGridDimensionB_);
}
}
}
}
#endif
// B transform with instant sort of local blocks from CAB -> CBA order
size_t numCA = (size_t)subsetOfCAlongB_ * myComplexGridDimensionA_;
#pragma omp parallel for num_threads(nThreads_)
for (size_t ca = 0; ca < numCA; ++ca) {
fftHelperB_.transform(buffer1 + ca * gridDimensionB_, FFTW_BACKWARD);
}
#pragma omp parallel for num_threads(nThreads_)
for (int c = 0; c < subsetOfCAlongB_; ++c) {
for (int a = 0; a < myComplexGridDimensionA_; ++a) {
int cx = c * myComplexGridDimensionA_ * gridDimensionB_ + a * gridDimensionB_;
for (int b = 0; b < myGridDimensionB_; ++b) {
for (int chunk = 0; chunk < numNodesB_; ++chunk) {
int cb = (chunk * subsetOfCAlongB_ + c) * myGridDimensionB_ * myComplexGridDimensionA_ +
b * myComplexGridDimensionA_;
buffer2[cb + a] = buffer1[cx + chunk * myGridDimensionB_ + b];
}
}
}
}
#if HAVE_MPI == 1
// Communicate B back to blocks
if (numNodesB_ > 1) {
mpiCommunicatorB_->allToAll(buffer2, buffer1,
subsetOfCAlongB_ * myComplexGridDimensionA_ * myGridDimensionB_);
} else {
std::swap(buffer1, buffer2);
}
// Communicate A along rows
if (numNodesA_ > 1) {
mpiCommunicatorA_->allToAll(buffer1, buffer2,
subsetOfCAlongA_ * myComplexGridDimensionA_ * myGridDimensionB_);
// Resort the data to end up with the buffer holding a full row of A data, for B pencil and C subset.
for (int c = 0; c < subsetOfCAlongA_; ++c) {
Complex *cPtr = buffer1 + c * myGridDimensionB_ * complexGridDimensionA_;
for (int b = 0; b < myGridDimensionB_; ++b) {
for (int chunk = 0; chunk < numNodesA_; ++chunk) {
Complex *inPtr = buffer2 +
(chunk * subsetOfCAlongA_ + c) * myComplexGridDimensionA_ * myGridDimensionB_ +
b * myComplexGridDimensionA_;
std::copy(inPtr, inPtr + myComplexGridDimensionA_,
cPtr + b * complexGridDimensionA_ + chunk * myComplexGridDimensionA_);
}
}
}
}
#else
std::swap(buffer1, buffer2);
#endif
// A transform
Real *realGrid = reinterpret_cast<Real *>(buffer2);
#pragma omp parallel for num_threads(nThreads_)
for (int cb = 0; cb < subsetOfCAlongA_ * myGridDimensionB_; ++cb) {
fftHelperA_.transform(buffer1 + cb * complexGridDimensionA_, realGrid + cb * gridDimensionA_);
}
#if HAVE_MPI == 1
// Communicate A back to blocks
if (numNodesA_ > 1) {
Real *realGrid2 = reinterpret_cast<Real *>(buffer1);
for (int c = 0; c < subsetOfCAlongA_; ++c) {
Real *cPtr = realGrid + c * myGridDimensionB_ * gridDimensionA_;
for (int b = 0; b < myGridDimensionB_; ++b) {
for (int chunk = 0; chunk < numNodesA_; ++chunk) {
Real *outPtr = realGrid2 +
(chunk * subsetOfCAlongA_ + c) * myGridDimensionB_ * myGridDimensionA_ +
b * myGridDimensionA_;
Real *inPtr = cPtr + b * gridDimensionA_ + chunk * myGridDimensionA_;
std::copy(inPtr, inPtr + myGridDimensionA_, outPtr);
}
}
}
mpiCommunicatorA_->allToAll(realGrid2, realGrid, subsetOfCAlongA_ * myGridDimensionB_ * myGridDimensionA_);
}
#endif
return realGrid;
}
/*!
* \brief Performs the backward 3D FFT of the discretized parameter grid using the compressed PME algorithm.
* \param reciprocalGrid the reciprocal space potential grid (stored in BAC order,
* with C being the fast running index) to be transformed.
* \return Pointer to the transformed grid, which is stored in one of the buffers in CBA order.
*/
Real *compressedInverseTransform(Real *reciprocalGrid) {
Real *__restrict__ buffer1, *__restrict__ buffer2;
if (reciprocalGrid == reinterpret_cast<Real *>(workSpace1_.data())) {
buffer1 = reinterpret_cast<Real *>(workSpace2_.data());
buffer2 = reinterpret_cast<Real *>(workSpace1_.data());
} else {
buffer1 = reinterpret_cast<Real *>(workSpace1_.data());
buffer2 = reinterpret_cast<Real *>(workSpace2_.data());
}
// Make the reciprocal dimensions the fast running indices
compressionCoefficientsA_.transposeInPlace();
compressionCoefficientsB_.transposeInPlace();
compressionCoefficientsC_.transposeInPlace();
#if HAVE_MPI == 1
int numNodes = numNodesA_ * numNodesB_ * numNodesC_;
if (numNodes > 1) {
mpiCommunicator_->allGather(buffer2, buffer1, myNumKSumTermsA_ * myNumKSumTermsB_ * myNumKSumTermsC_);
// Resort the data to be grouped by node, for communication
for (int node = 0; node < numNodes; ++node) {
int nodeStartA = myNumKSumTermsA_ * (node % numNodesA_);
int nodeStartB = myNumKSumTermsB_ * ((node % (numNodesB_ * numNodesA_)) / numNodesA_);
int nodeStartC = myNumKSumTermsC_ * (node / (numNodesB_ * numNodesA_));
Real *inPtr = buffer1 + node * myNumKSumTermsA_ * myNumKSumTermsB_ * myNumKSumTermsC_;
for (int B = 0; B < myNumKSumTermsB_; ++B) {
Real *outPtrB = buffer2 + (nodeStartB + B) * numKSumTermsA_ * numKSumTermsC_;
for (int A = 0; A < myNumKSumTermsA_; ++A) {
Real *outPtrBA = outPtrB + (nodeStartA + A) * numKSumTermsC_;
Real *outPtrBAC = outPtrBA + nodeStartC;
std::copy(inPtr, inPtr + myNumKSumTermsC_, outPtrBAC);
inPtr += myNumKSumTermsC_;
}
}
}
}
#endif
// Transform C index
contractABxCWithDxC<Real>(buffer2, compressionCoefficientsC_[0], numKSumTermsB_ * numKSumTermsA_,
numKSumTermsC_, myGridDimensionC_, buffer1);
// Sort BAC->CAB
permuteABCtoCBA(buffer1, numKSumTermsB_, numKSumTermsA_, myGridDimensionC_, buffer2, nThreads_);
// Transform B index
contractABxCWithDxC<Real>(buffer2, compressionCoefficientsB_[0], myGridDimensionC_ * numKSumTermsA_,
numKSumTermsB_, myGridDimensionB_, buffer1);
// Sort CAB->CBA
permuteABCtoACB(buffer1, myGridDimensionC_, numKSumTermsA_, myGridDimensionB_, buffer2, nThreads_);
// Transform A index
contractABxCWithDxC<Real>(buffer2, compressionCoefficientsA_[0], myGridDimensionC_ * myGridDimensionB_,
numKSumTermsA_, myGridDimensionA_, buffer1);
// Make the grid dimensions the fast running indices again
compressionCoefficientsA_.transposeInPlace();
compressionCoefficientsB_.transposeInPlace();
compressionCoefficientsC_.transposeInPlace();
return buffer1;
}
/*!
* \brief convolveE performs the convolution on a compressed PME transformed Grid
* \param transformedGrid the pointer to the complex array holding the transformed grid in YXZ ordering.
* \return the reciprocal space energy.
*/
Real convolveE(Real *transformedGrid) {
updateInfluenceFunction();
size_t nxz = (size_t)myNumKSumTermsA_ * myNumKSumTermsC_;
size_t nyxz = myNumKSumTermsB_ * nxz;
bool iAmNodeZero = (myNodeRankA_ == 0 && myNodeRankB_ == 0 && myNodeRankC_ == 0);
Real *influenceFunction = cachedInfluenceFunction_.data();
Real energy = 0;
if (rPower_ > 3 && iAmNodeZero) {
// Kernels with rPower>3 are absolutely convergent and should have the m=0 term present.
// To compute it we need sum_ij c(i)c(j), which can be obtained from the structure factor norm.
Real prefac = 2 * scaleFactor_ * HELPME_PI * HELPME_SQRTPI * pow(kappa_, rPower_ - 3) /
((rPower_ - 3) * nonTemplateGammaComputer<Real>(rPower_) * cellVolume());
energy += prefac * transformedGrid[0] * transformedGrid[0];
}
if (iAmNodeZero) transformedGrid[0] = 0;
// Writing the three nested loops in one allows for better load balancing in parallel.
#pragma omp parallel for reduction(+ : energy) num_threads(nThreads_)
for (size_t yxz = 0; yxz < nyxz; ++yxz) {
energy += transformedGrid[yxz] * transformedGrid[yxz] * influenceFunction[yxz];
transformedGrid[yxz] *= influenceFunction[yxz];
}
return energy / 2;
}
/*!
* \brief convolveE performs the convolution of a standard PME transformed grid
* \param transformedGrid the pointer to the complex array holding the transformed grid in YXZ ordering.
* \return the reciprocal space energy.
*/
Real convolveE(Complex *transformedGrid) {
updateInfluenceFunction();
size_t nxz = (size_t)myNumKSumTermsA_ * myNumKSumTermsC_;
size_t nyxz = myNumKSumTermsB_ * nxz;
bool iAmNodeZero = (myNodeRankA_ == 0 && myNodeRankB_ == 0 && myNodeRankC_ == 0);
Real *influenceFunction = cachedInfluenceFunction_.data();
bool useConjugateSymmetry = algorithmType_ == AlgorithmType::PME;
Real energy = 0;
if (rPower_ > 3 && iAmNodeZero) {
// Kernels with rPower>3 are absolutely convergent and should have the m=0 term present.
// To compute it we need sum_ij c(i)c(j), which can be obtained from the structure factor norm.
Real prefac = 2 * scaleFactor_ * HELPME_PI * HELPME_SQRTPI * pow(kappa_, rPower_ - 3) /
((rPower_ - 3) * nonTemplateGammaComputer<Real>(rPower_) * cellVolume());
energy += prefac * std::norm(transformedGrid[0]);
}
if (iAmNodeZero) transformedGrid[0] = Complex(0, 0);
const size_t numCTerms(myNumKSumTermsC_);
#pragma omp parallel for reduction(+ : energy) num_threads(nThreads_)
for (size_t yxz = 0; yxz < nyxz; ++yxz) {
size_t xz = yxz % nxz;
int kx = firstKSumTermA_ + xz / numCTerms;
// We only loop over the first nx/2+1 x values; this
// accounts for the "missing" complex conjugate values.
Real permPrefac = useConjugateSymmetry && kx != 0 && kx != complexGridDimensionA_ - 1 ? 2 : 1;
Real structFactorNorm = transformedGrid[yxz].real() * transformedGrid[yxz].real() +
transformedGrid[yxz].imag() * transformedGrid[yxz].imag();
energy += permPrefac * structFactorNorm * influenceFunction[yxz];
transformedGrid[yxz] *= influenceFunction[yxz];
}
return energy / 2;
}
/*!
* \brief convolveEV A wrapper to determine the correct convolution function to call, including virial, for
* the compressed PME algorithm.
* \param transformedGrid the pointer to the Fourier space array holding the transformed grid in YXZ ordering.
* \param convolvedGrid the (output) pointer to the Fourier space array holding the convolved grid in YXZ ordering.
* \param virial a vector of length 6 containing the unique virial elements, in the order XX XY YY XZ YZ ZZ.
* This vector is incremented, not assigned.
* \return the reciprocal space energy.
*/
Real convolveEV(const Real *transformedGrid, Real *&convolvedGrid, RealMat &virial) {
convolvedGrid = transformedGrid == reinterpret_cast<Real *>(workSpace1_.data())
? reinterpret_cast<Real *>(workSpace2_.data())
: reinterpret_cast<Real *>(workSpace1_.data());
return convolveEVCompressedFxn_(
myNumKSumTermsA_, myNumKSumTermsB_, myNumKSumTermsC_, firstKSumTermA_, firstKSumTermB_, firstKSumTermC_,
scaleFactor_, transformedGrid, convolvedGrid, recVecs_, cellVolume(), kappa_, &splineModA_[0],
&splineModB_[0], &splineModC_[0], mValsA_.data(), mValsB_.data(), mValsC_.data(), virial, nThreads_);
}
/*!
* \brief convolveEV A wrapper to determine the correct convolution function to call, including virial, for
* the conventional PME algorithm.
* \param transformedGrid the pointer to the complex array holding the transformed grid in YXZ ordering.
* \param virial a vector of length 6 containing the unique virial elements, in the order XX XY YY XZ YZ ZZ.
* This vector is incremented, not assigned.
* \return the reciprocal space energy.
*/
Real convolveEV(Complex *transformedGrid, RealMat &virial) {
return convolveEVFxn_(true, complexGridDimensionA_, myNumKSumTermsA_, myNumKSumTermsB_, myNumKSumTermsC_,
firstKSumTermA_, firstKSumTermB_, firstKSumTermC_, scaleFactor_, transformedGrid,
recVecs_, cellVolume(), kappa_, &splineModA_[0], &splineModB_[0], &splineModC_[0],
mValsA_.data(), mValsB_.data(), mValsC_.data(), virial, nThreads_);
}
/*!
* \brief Probes the potential grid to get the forces. Generally this shouldn't be called;
* use the various computeE() methods instead. This is the faster version that uses
* the filtered atom list and uses pre-computed splines. Therefore, the splineCache_
* member must have been updated via a call to filterAtomsAndBuildSplineCache() first.
*
* \param potentialGrid pointer to the array containing the potential, in ZYX order.
* \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for
* quadrupoles, etc.). \param parameters the list of parameters associated with each atom (charges, C6
* coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL
* is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering
*
* 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ...
*
* i.e. generated by the python loops
* \code{.py}
* for L in range(maxAM+1):
* for Lz in range(0,L+1):
* for Ly in range(0, L - Lz + 1):
* Lx = L - Ly - Lz
* \endcode
* \param forces a Nx3 matrix of the forces, ordered in memory as {Fx1,Fy1,Fz1,Fx2,Fy2,Fz2,....FxN,FyN,FzN}.
* \param virial pointer to the virial vector if needed
*/
void probeGrid(const Real *potentialGrid, int parameterAngMom, const RealMat ¶meters, RealMat &forces,
Real *virial = nullptr) {
updateAngMomIterator(parameterAngMom + 1);
int nComponents = nCartesian(parameterAngMom);
int nForceComponents = nCartesian(parameterAngMom + 1);
const Real *paramPtr = parameters[0];
// Find how many multiples of the cache line size are needed
// to ensure that each thread hits a unique page.
size_t rowSize = std::ceil(nForceComponents / cacheLineSizeInReals_) * cacheLineSizeInReals_;
if (fractionalPhis_.nRows() != nThreads_ || fractionalPhis_.nCols() != rowSize) {
fractionalPhis_ = RealMat(nThreads_, rowSize);
}
RealMat fractionalParams;
Real cartPhi[3];
if (parameterAngMom) {
fractionalParams = cartesianTransform(parameterAngMom, false, scaledRecVecs_.transpose(), parameters);
if (virial) {
if (parameterAngMom > 1) {
// The structure factor derivatives below are only implemented up to dipoles for now
throw std::runtime_error("Only multipoles up to L=1 are supported if the virial is requested");
}
}
}
size_t nAtoms = std::accumulate(numAtomsPerThread_.begin(), numAtomsPerThread_.end(), 0);
#pragma omp parallel num_threads(nThreads_)
{
#ifdef _OPENMP
int threadID = omp_get_thread_num();
#else
int threadID = 0;
#endif
#pragma omp for
for (size_t atom = 0; atom < nAtoms; ++atom) {
const auto &cacheEntry = splineCache_[atom];
const auto &absAtom = cacheEntry.absoluteAtomNumber;
const auto &splineA = cacheEntry.aSpline;
const auto &splineB = cacheEntry.bSpline;
const auto &splineC = cacheEntry.cSpline;
if (parameterAngMom) {
Real *myScratch = fractionalPhis_[threadID % nThreads_];
probeGridImpl(absAtom, potentialGrid, nComponents, nForceComponents, splineA, splineB, splineC,
myScratch, fractionalParams[absAtom], forces[absAtom]);
// Add extra virial terms coming from the derivative of the structure factor.
// See eq. 2.16 of https://doi.org/10.1063/1.1630791 for details
if (virial) {
// Get the potential in the Cartesian basis
matrixVectorProduct(scaledRecVecs_, &myScratch[1], &cartPhi[0]);
const Real *parm = parameters[absAtom];
virial[0] += cartPhi[0] * parm[1];
virial[1] += 0.5f * (cartPhi[0] * parm[2] + cartPhi[1] * parm[1]);
virial[2] += cartPhi[1] * parm[2];
virial[3] += 0.5f * (cartPhi[0] * parm[3] + cartPhi[2] * parm[1]);
virial[4] += 0.5f * (cartPhi[1] * parm[3] + cartPhi[2] * parm[2]);
virial[5] += cartPhi[2] * parm[3];
}
} else {
probeGridImpl(potentialGrid, splineA, splineB, splineC, paramPtr[absAtom], forces[absAtom]);
}
}
}
}
/*!
* \brief computeESlf computes the Ewald self interaction energy.
* \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for
* quadrupoles, etc.). \param parameters the list of parameters associated with each atom (charges, C6
* coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL
* is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering
*
* 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ...
*
* i.e. generated by the python loops
* \code{.py}
* for L in range(maxAM+1):
* for Lz in range(0,L+1):
* for Ly in range(0, L - Lz + 1):
* Lx = L - Ly - Lz
* \endcode
* \return the self energy.
*/
Real computeESlf(int parameterAngMom, const RealMat ¶meters) {
assertInitialized();
auto prefac = slfEFxn_(parameterAngMom, kappa_, scaleFactor_);
size_t nAtoms = parameters.nRows();
Real sumCoefs = 0;
for (size_t atom = 0; atom < nAtoms; ++atom) {
sumCoefs += parameters(atom, 0) * parameters(atom, 0);
}
return prefac * sumCoefs;
}
/*!
* \brief computeEDir computes the direct space energy. This is provided mostly for debugging and testing
* purposes; generally the host program should provide the pairwise interactions. \param pairList dense list of
* atom pairs, ordered like i1, j1, i2, j2, i3, j3, ... iN, jN. \param parameterAngMom the angular momentum of
* the parameters (0 for charges, C6 coefficients, 2 for quadrupoles, etc.). \param parameters the list of
* parameters associated with each atom (charges, C6 coefficients, multipoles, etc...). For a parameter with
* angular momentum L, a matrix of dimension nAtoms x nL is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the
* fast running index nL has the ordering
*
* 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ...
*
* i.e. generated by the python loops
* \code{.py}
* for L in range(maxAM+1):
* for Lz in range(0,L+1):
* for Ly in range(0, L - Lz + 1):
* Lx = L - Ly - Lz
* \endcode
* \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}.
* \return the direct space energy.
*/
Real computeEDir(const Matrix<short> &pairList, int parameterAngMom, const RealMat ¶meters,
const RealMat &coordinates) {
if (parameterAngMom) throw std::runtime_error("Multipole direct terms have not been coded yet.");
sanityChecks(parameterAngMom, parameters, coordinates);
Real energy = 0;
Real kappaSquared = kappa_ * kappa_;
size_t nPair = pairList.nRows();
for (int pair = 0; pair < nPair; ++pair) {
short i = pairList(pair, 0);
short j = pairList(pair, 1);
auto deltaR = coordinates.row(j) - coordinates.row(i);
// TODO: apply minimum image convention.
Real rSquared = deltaR.dot(deltaR);
energy += parameters(i, 0) * parameters(j, 0) * dirEFxn_(rSquared, kappaSquared);
}
return scaleFactor_ * energy;
}
/*!
* \brief computeEFDir computes the direct space energy and force. This is provided mostly for debugging and
* testing purposes; generally the host program should provide the pairwise interactions.
* \param pairList dense list of atom pairs, ordered like i1, j1, i2, j2, i3, j3, ... iN, jN.
* \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for
* quadrupoles, etc.). \param parameters the list of parameters associated with each atom (charges, C6
* coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL
* is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering
*
* 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ...
*
* i.e. generated by the python loops
* \code{.py}
* for L in range(maxAM+1):
* for Lz in range(0,L+1):
* for Ly in range(0, L - Lz + 1):
* Lx = L - Ly - Lz
* \endcode
* \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}.
* \param forces a Nx3 matrix of the forces, ordered in memory as {Fx1,Fy1,Fz1,Fx2,Fy2,Fz2,....FxN,FyN,FzN}.
* This matrix is incremented, not assigned.
* \return the direct space energy.
*/
Real computeEFDir(const Matrix<short> &pairList, int parameterAngMom, const RealMat ¶meters,
const RealMat &coordinates, RealMat &forces) {
if (parameterAngMom) throw std::runtime_error("Multipole self terms have not been coded yet.");
sanityChecks(parameterAngMom, parameters, coordinates);
Real energy = 0;
Real kappaSquared = kappa_ * kappa_;
size_t nPair = pairList.nRows();
for (int pair = 0; pair < nPair; ++pair) {
short i = pairList(pair, 0);
short j = pairList(pair, 1);
auto deltaR = coordinates.row(j) - coordinates.row(i);
// TODO: apply minimum image convention.
Real rSquared = deltaR.dot(deltaR);
auto kernels = dirEFFxn_(rSquared, kappa_, kappaSquared);
Real eKernel = std::get<0>(kernels);
Real fKernel = std::get<1>(kernels);
Real prefactor = scaleFactor_ * parameters(i, 0) * parameters(j, 0);
energy += prefactor * eKernel;
Real f = -prefactor * fKernel;
auto force = deltaR.row(0);
force *= f;
forces.row(i) -= force;
forces.row(j) += force;
}
return energy;
}
/*!
* \brief computeEFVDir computes the direct space energy, force and virial. This is provided mostly for
* debugging and testing purposes; generally the host program should provide the pairwise interactions. \param
* pairList dense list of atom pairs, ordered like i1, j1, i2, j2, i3, j3, ... iN, jN. \param parameterAngMom
* the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for quadrupoles, etc.). \param
* parameters the list of parameters associated with each atom (charges, C6 coefficients, multipoles, etc...).
* For a parameter with angular momentum L, a matrix of dimension nAtoms x nL is expected, where nL =
* (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering
*
* 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ...
*
* i.e. generated by the python loops
* \code{.py}
* for L in range(maxAM+1):
* for Lz in range(0,L+1):
* for Ly in range(0, L - Lz + 1):
* Lx = L - Ly - Lz
* \endcode
* \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}.
* \param forces a Nx3 matrix of the forces, ordered in memory as {Fx1,Fy1,Fz1,Fx2,Fy2,Fz2,....FxN,FyN,FzN}.
* This matrix is incremented, not assigned.
* \param virial a vector of length 6 containing the unique virial elements, in the order XX XY YY XZ YZ ZZ.
* This vector is incremented, not assigned.
* \return the direct space energy.
*/
Real computeEFVDir(const Matrix<short> &pairList, int parameterAngMom, const RealMat ¶meters,
const RealMat &coordinates, RealMat &forces, RealMat &virial) {
if (parameterAngMom) throw std::runtime_error("Multipole self terms have not been coded yet.");
sanityChecks(parameterAngMom, parameters, coordinates);
Real energy = 0;
Real kappaSquared = kappa_ * kappa_;
size_t nPair = pairList.nRows();
for (int pair = 0; pair < nPair; ++pair) {
short i = pairList(pair, 0);
short j = pairList(pair, 1);
auto deltaR = coordinates.row(j) - coordinates.row(i);
// TODO: apply minimum image convention.
Real rSquared = deltaR.dot(deltaR);
auto kernels = dirEFFxn_(rSquared, kappa_, kappaSquared);
Real eKernel = std::get<0>(kernels);
Real fKernel = std::get<1>(kernels);
Real prefactor = scaleFactor_ * parameters(i, 0) * parameters(j, 0);
energy += prefactor * eKernel;
Real f = -prefactor * fKernel;
RealMat dRCopy = deltaR.clone();
auto force = dRCopy.row(0);
force *= f;
forces.row(i) -= force;
forces.row(j) += force;
virial[0][0] += force[0] * deltaR[0][0];
virial[0][1] += 0.5f * (force[0] * deltaR[0][1] + force[1] * deltaR[0][0]);
virial[0][2] += force[1] * deltaR[0][1];
virial[0][3] += 0.5f * (force[0] * deltaR[0][2] + force[2] * deltaR[0][0]);
virial[0][4] += 0.5f * (force[1] * deltaR[0][2] + force[2] * deltaR[0][1]);
virial[0][5] += force[2] * deltaR[0][2];
}
return energy;
}
/*!
* \brief computeEAdj computes the adjusted real space energy which extracts the energy for excluded pairs that
* is present in reciprocal space. This is provided mostly for debugging and testing purposes; generally the
* host program should provide the pairwise interactions.
* \param pairList dense list of atom pairs, ordered like i1, j1, i2, j2, i3, j3, ... iN, jN.
* \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for
* quadrupoles, etc.). \param parameters the list of parameters associated with each atom (charges, C6
* coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL
* is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering
*
* 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ...
*
* i.e. generated by the python loops
* \code{.py}
* for L in range(maxAM+1):
* for Lz in range(0,L+1):
* for Ly in range(0, L - Lz + 1):
* Lx = L - Ly - Lz
* \endcode
* \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}.
* \return the adjusted energy.
*/
Real computeEAdj(const Matrix<short> &pairList, int parameterAngMom, const RealMat ¶meters,
const RealMat &coordinates) {
if (parameterAngMom) throw std::runtime_error("Multipole self terms have not been coded yet.");
sanityChecks(parameterAngMom, parameters, coordinates);
Real energy = 0;
Real kappaSquared = kappa_ * kappa_;
size_t nPair = pairList.nRows();
for (int pair = 0; pair < nPair; ++pair) {
short i = pairList(pair, 0);
short j = pairList(pair, 1);
auto deltaR = coordinates.row(j) - coordinates.row(i);
// TODO: apply minimum image convention.
Real rSquared = deltaR.dot(deltaR);
energy += parameters(i, 0) * parameters(j, 0) * adjEFxn_(rSquared, kappaSquared);
}
return scaleFactor_ * energy;
}
/*!
* \brief computeEFAdj computes the adjusted energy and force. This is provided mostly for debugging and
* testing purposes; generally the host program should provide the pairwise interactions. \param pairList dense
* list of atom pairs, ordered like i1, j1, i2, j2, i3, j3, ... iN, jN. \param parameterAngMom the angular
* momentum of the parameters (0 for charges, C6 coefficients, 2 for quadrupoles, etc.). \param parameters the
* list of parameters associated with each atom (charges, C6 coefficients, multipoles, etc...). For a parameter
* with angular momentum L, a matrix of dimension nAtoms x nL is expected, where nL = (L+1)*(L+2)*(L+3)/6 and
* the fast running index nL has the ordering
*
* 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ...
*
* i.e. generated by the python loops
* \code{.py}
* for L in range(maxAM+1):
* for Lz in range(0,L+1):
* for Ly in range(0, L - Lz + 1):
* Lx = L - Ly - Lz
* \endcode
* \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}.
* \param forces a Nx3 matrix of the forces, ordered in memory as {Fx1,Fy1,Fz1,Fx2,Fy2,Fz2,....FxN,FyN,FzN}.
* This matrix is incremented, not assigned.
* \return the adjusted energy.
*/
Real computeEFAdj(const Matrix<short> &pairList, int parameterAngMom, const RealMat ¶meters,
const RealMat &coordinates, RealMat &forces) {
if (parameterAngMom) throw std::runtime_error("Multipole self terms have not been coded yet.");
sanityChecks(parameterAngMom, parameters, coordinates);
Real energy = 0;
Real kappaSquared = kappa_ * kappa_;
size_t nPair = pairList.nRows();
for (int pair = 0; pair < nPair; ++pair) {
short i = pairList(pair, 0);
short j = pairList(pair, 1);
auto deltaR = coordinates.row(j) - coordinates.row(i);
// TODO: apply minimum image convention.
Real rSquared = deltaR.dot(deltaR);
auto kernels = adjEFFxn_(rSquared, kappa_, kappaSquared);
Real eKernel = std::get<0>(kernels);
Real fKernel = std::get<1>(kernels);
Real prefactor = scaleFactor_ * parameters(i, 0) * parameters(j, 0);
energy += prefactor * eKernel;
Real f = -prefactor * fKernel;
auto force = deltaR.row(0);
force *= f;
forces.row(i) -= force;
forces.row(j) += force;
}
return energy;
}
/*!
* \brief computeEFVAdj computes the adjusted energy, forces and virial. This is provided mostly for debugging
* and testing purposes; generally the host program should provide the pairwise interactions.
* \param pairList dense list of atom pairs, ordered like i1, j1, i2, j2, i3, j3, ... iN, jN.
* \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for
* quadrupoles, etc.). \param parameters the list of parameters associated with each atom (charges, C6
* coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL
* is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering
*
* 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ...
*
* i.e. generated by the python loops
* \code{.py}
* for L in range(maxAM+1):
* for Lz in range(0,L+1):
* for Ly in range(0, L - Lz + 1):
* Lx = L - Ly - Lz
* \endcode
* \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}.
* \param forces a Nx3 matrix of the forces, ordered in memory as {Fx1,Fy1,Fz1,Fx2,Fy2,Fz2,....FxN,FyN,FzN}.
* This matrix is incremented, not assigned.
* \param virial a vector of length 6 containing the unique virial elements, in the order XX XY YY XZ YZ ZZ.
* This vector is incremented, not assigned.
* \return the adjusted energy.
*/
Real computeEFVAdj(const Matrix<short> &pairList, int parameterAngMom, const RealMat ¶meters,
const RealMat &coordinates, RealMat &forces, RealMat &virial) {
if (parameterAngMom) throw std::runtime_error("Multipole self terms have not been coded yet.");
sanityChecks(parameterAngMom, parameters, coordinates);
Real energy = 0;
Real kappaSquared = kappa_ * kappa_;
size_t nPair = pairList.nRows();
for (int pair = 0; pair < nPair; ++pair) {
short i = pairList(pair, 0);
short j = pairList(pair, 1);
auto deltaR = coordinates.row(j) - coordinates.row(i);
// TODO: apply minimum image convention.
Real rSquared = deltaR.dot(deltaR);
auto kernels = adjEFFxn_(rSquared, kappa_, kappaSquared);
Real eKernel = std::get<0>(kernels);
Real fKernel = std::get<1>(kernels);
Real prefactor = scaleFactor_ * parameters(i, 0) * parameters(j, 0);
energy += prefactor * eKernel;
Real f = -prefactor * fKernel;
RealMat dRCopy = deltaR.clone();
auto force = dRCopy.row(0);
force *= f;
forces.row(i) -= force;
forces.row(j) += force;
virial[0][0] += force[0] * deltaR[0][0];
virial[0][1] += 0.5f * (force[0] * deltaR[0][1] + force[1] * deltaR[0][0]);
virial[0][2] += force[1] * deltaR[0][1];
virial[0][3] += 0.5f * (force[0] * deltaR[0][2] + force[2] * deltaR[0][0]);
virial[0][4] += 0.5f * (force[1] * deltaR[0][2] + force[2] * deltaR[0][1]);
virial[0][5] += force[2] * deltaR[0][2];
}
return energy;
}
/*!
* \brief Computes the full electrostatic potential at atomic sites due to point charges located at those same
* sites. The site located at each probe location is neglected, to avoid the resulting singularity.
* \param charges * the list of point charges (in e) associated with each particle.
* \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}.
* \param potential the array holding the potential. This is * a matrix of dimensions nAtoms x 1.
* \param sphericalCutoff the cutoff (in A) applied to the real space summations,
* which must be no more than half of the box dimensions.
*/
void computePAtAtomicSites(const RealMat &charges, const RealMat &coordinates, RealMat &potential,
Real sphericalCutoff) {
sanityChecks(0, charges, coordinates);
// The minumum image convention requires that the cutoff be less than half the minumum box width
checkMinimumImageCutoff(sphericalCutoff);
size_t nAtoms = coordinates.nRows();
// Direct space, using simple O(N^2) algorithm. This can be improved using a nonbonded list if needed.
Real cutoffSquared = sphericalCutoff * sphericalCutoff;
Real kappaSquared = kappa_ * kappa_;
#pragma omp parallel for num_threads(nThreads_)
for (size_t i = 0; i < nAtoms; ++i) {
const auto &coordsI = coordinates.row(i);
Real *phiPtr = potential[i];
for (size_t j = 0; j < nAtoms; ++j) {
// No self interactions are included, to remove the singularity
if (i == j) continue;
Real qJ = charges[j][0];
const auto &coordsJ = coordinates.row(j);
auto RIJ = minimumImageDeltaR(coordsI, coordsJ);
Real rSquared = RIJ[0] * RIJ[0] + RIJ[1] * RIJ[1] + RIJ[2] * RIJ[2];
if (rSquared < cutoffSquared) {
*phiPtr += scaleFactor_ * qJ * dirEFxn_(rSquared, kappaSquared);
}
}
}
// Reciprocal space term
filterAtomsAndBuildSplineCache(0, coordinates);
auto realGrid = spreadParameters(0, charges);
Real *potentialGrid;
if (algorithmType_ == AlgorithmType::PME) {
auto gridAddress = forwardTransform(realGrid);
convolveE(gridAddress);
potentialGrid = inverseTransform(gridAddress);
} else if (algorithmType_ == AlgorithmType::CompressedPME) {
auto gridAddress = compressedForwardTransform(realGrid);
convolveE(gridAddress);
potentialGrid = compressedInverseTransform(gridAddress);
} else {
std::logic_error("Unknown algorithm in helpme::computePAtAtomicSites");
}
#pragma omp parallel for num_threads(nThreads_)
for (size_t atom = 0; atom < nAtoms; ++atom) {
const auto &cacheEntry = splineCache_[atom];
const auto &absAtom = cacheEntry.absoluteAtomNumber;
probeGridImpl(potentialGrid, 1, cacheEntry.aSpline, cacheEntry.bSpline, cacheEntry.cSpline,
potential[absAtom]);
}
// Self term - back out the contribution from the atoms at each probe site
Real prefac = slfEFxn_(0, kappa_, scaleFactor_);
for (size_t atom = 0; atom < nAtoms; ++atom) {
potential[atom][0] += 2 * prefac * charges[atom][0];
}
}
/*!
* \brief Runs a PME reciprocal space calculation, computing the potential and, optionally, its derivatives.
* \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for
* quadrupoles, etc.). A negative value indicates that only the shell with |parameterAngMom| is to be considered,
* e.g. a value of -2 specifies that only quadrupoles (and not dipoles or charges) will be provided; the input
* matrix should have dimensions corresponding only to the number of terms in this shell.
* \param parameters the list of parameters associated with each atom (charges, C6
* coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL
* is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering
*
* 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ...
*
* i.e. generated by the python loops
* \code{.py}
* for L in range(maxAM+1):
* for Lz in range(0,L+1):
* for Ly in range(0, L - Lz + 1):
* Lx = L - Ly - Lz
* \endcode
* \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}.
* \param energy pointer to the variable holding the energy; this is incremented, not assigned.
* \param gridPoints the list of grid points at which the potential is needed; can be the same as the
* coordinates.
* \param derivativeLevel the order of the potential derivatives required; 0 is the potential, 1 is
* (minus) the field, etc. A negative value indicates that only the derivative with order |parameterAngMom|
* is to be generated, e.g. -2 specifies that only the second derivative (not the potential or its gradient)
* will be returned as output. The output matrix should have space for only these terms, accordingly.
* \param potential the array holding the potential. This is a matrix of dimensions
* nAtoms x nD, where nD is the derivative level requested. See the details fo the parameters argument for
* information about ordering of derivative components. N.B. this array is incremented with the potential, not
* assigned, so take care to zero it first if only the current results are desired.
*/
void computePRec(int parameterAngMom, const RealMat ¶meters, const RealMat &coordinates,
const RealMat &gridPoints, int derivativeLevel, RealMat &potential) {
bool onlyOneShellForInput = parameterAngMom < 0;
bool onlyOneShellForOutput = derivativeLevel < 0;
parameterAngMom = std::abs(parameterAngMom);
derivativeLevel = std::abs(derivativeLevel);
int cartesianOffset = onlyOneShellForInput ? nCartesian(parameterAngMom - 1) : 0;
sanityChecks(parameterAngMom, parameters, coordinates, cartesianOffset);
updateAngMomIterator(std::max(parameterAngMom, derivativeLevel));
// Note: we're calling the version of spread parameters that computes its own splines here.
// This is quite inefficient, but allow the potential to be computed at arbitrary locations by
// simply regenerating splines on demand in the probing stage. If this becomes too slow, it's
// easy to write some logic to check whether gridPoints and coordinates are the same, and
// handle that special case using spline cacheing machinery for efficiency.
Real *realGrid = reinterpret_cast<Real *>(workSpace1_.data());
std::fill(workSpace1_.begin(), workSpace1_.end(), 0);
updateAngMomIterator(parameterAngMom);
auto fractionalParameters =
cartesianTransform(parameterAngMom, onlyOneShellForInput, scaledRecVecs_.transpose(), parameters);
int nComponents = nCartesian(parameterAngMom) - cartesianOffset;
size_t nAtoms = coordinates.nRows();
for (size_t atom = 0; atom < nAtoms; ++atom) {
// Blindly reconstruct splines for this atom, assuming nothing about the validity of the cache.
// Note that this incurs a somewhat steep cost due to repeated memory allocations.
auto bSplines = makeBSplines(coordinates[atom], parameterAngMom);
const auto &splineA = std::get<0>(bSplines);
const auto &splineB = std::get<1>(bSplines);
const auto &splineC = std::get<2>(bSplines);
const auto &aGridIterator = gridIteratorA_[splineA.startingGridPoint()];
const auto &bGridIterator = gridIteratorB_[splineB.startingGridPoint()];
const auto &cGridIterator = gridIteratorC_[splineC.startingGridPoint()];
int numPointsA = static_cast<int>(aGridIterator.size());
int numPointsB = static_cast<int>(bGridIterator.size());
int numPointsC = static_cast<int>(cGridIterator.size());
const auto *iteratorDataA = aGridIterator.data();
const auto *iteratorDataB = bGridIterator.data();
const auto *iteratorDataC = cGridIterator.data();
for (int component = 0; component < nComponents; ++component) {
const auto &quanta = angMomIterator_[component + cartesianOffset];
Real param = fractionalParameters(atom, component);
const Real *splineValsA = splineA[quanta[0]];
const Real *splineValsB = splineB[quanta[1]];
const Real *splineValsC = splineC[quanta[2]];
for (int pointC = 0; pointC < numPointsC; ++pointC) {
const auto &cPoint = iteratorDataC[pointC];
Real cValP = param * splineValsC[cPoint.second];
for (int pointB = 0; pointB < numPointsB; ++pointB) {
const auto &bPoint = iteratorDataB[pointB];
Real cbValP = cValP * splineValsB[bPoint.second];
Real *cbRow = &realGrid[cPoint.first * myGridDimensionB_ * myGridDimensionA_ +
bPoint.first * myGridDimensionA_];
for (int pointA = 0; pointA < numPointsA; ++pointA) {
const auto &aPoint = iteratorDataA[pointA];
cbRow[aPoint.first] += cbValP * splineValsA[aPoint.second];
}
}
}
}
}
Real *potentialGrid;
if (algorithmType_ == AlgorithmType::PME) {
auto gridAddress = forwardTransform(realGrid);
convolveE(gridAddress);
potentialGrid = inverseTransform(gridAddress);
} else if (algorithmType_ == AlgorithmType::CompressedPME) {
auto gridAddress = compressedForwardTransform(realGrid);
convolveE(gridAddress);
potentialGrid = compressedInverseTransform(gridAddress);
} else {
std::logic_error("Unknown algorithm in helpme::computePRec");
}
auto fracPotential = potential.clone();
fracPotential.setZero();
cartesianOffset = onlyOneShellForOutput ? nCartesian(derivativeLevel - 1) : 0;
int nPotentialComponents = nCartesian(derivativeLevel) - cartesianOffset;
size_t nPoints = gridPoints.nRows();
for (size_t point = 0; point < nPoints; ++point) {
Real *phiPtr = fracPotential[point];
auto bSplines = makeBSplines(gridPoints[point], derivativeLevel);
auto splineA = std::get<0>(bSplines);
auto splineB = std::get<1>(bSplines);
auto splineC = std::get<2>(bSplines);
const auto &aGridIterator = gridIteratorA_[splineA.startingGridPoint()];
const auto &bGridIterator = gridIteratorB_[splineB.startingGridPoint()];
const auto &cGridIterator = gridIteratorC_[splineC.startingGridPoint()];
const Real *splineStartA = splineA[0];
const Real *splineStartB = splineB[0];
const Real *splineStartC = splineC[0];
for (const auto &cPoint : cGridIterator) {
for (const auto &bPoint : bGridIterator) {
const Real *cbRow = potentialGrid + cPoint.first * myGridDimensionA_ * myGridDimensionB_ +
bPoint.first * myGridDimensionA_;
for (const auto &aPoint : aGridIterator) {
Real gridVal = cbRow[aPoint.first];
for (int component = 0; component < nPotentialComponents; ++component) {
const auto &quanta = angMomIterator_[component + cartesianOffset];
const Real *splineValsA = splineStartA + quanta[0] * splineOrder_;
const Real *splineValsB = splineStartB + quanta[1] * splineOrder_;
const Real *splineValsC = splineStartC + quanta[2] * splineOrder_;
phiPtr[component] += gridVal * splineValsA[aPoint.second] * splineValsB[bPoint.second] *
splineValsC[cPoint.second];
}
}
}
}
}
potential += cartesianTransform(derivativeLevel, onlyOneShellForOutput, scaledRecVecs_, fracPotential);
}
/*!
* \brief Runs a PME reciprocal space calculation, computing energies.
* \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for
* quadrupoles, etc.). \param parameters the list of parameters associated with each atom (charges, C6
* coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL
* is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering
*
* 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ...
*
* i.e. generated by the python loops
* \code{.py}
* for L in range(maxAM+1):
* for Lz in range(0,L+1):
* for Ly in range(0, L - Lz + 1):
* Lx = L - Ly - Lz
* \endcode
* \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}.
* \param energy pointer to the variable holding the energy; this is incremented, not assigned.
* \return the reciprocal space energy.
*/
Real computeERec(int parameterAngMom, const RealMat ¶meters, const RealMat &coordinates) {
sanityChecks(parameterAngMom, parameters, coordinates);
filterAtomsAndBuildSplineCache(parameterAngMom, coordinates);
auto realGrid = spreadParameters(parameterAngMom, parameters);
Real energy;
if (algorithmType_ == AlgorithmType::PME) {
auto gridAddress = forwardTransform(realGrid);
energy = convolveE(gridAddress);
} else if (algorithmType_ == AlgorithmType::CompressedPME) {
auto gridAddress = compressedForwardTransform(realGrid);
energy = convolveE(gridAddress);
} else {
std::logic_error("Unknown algorithm in helpme::computeERec");
}
return energy;
}
/*!
* \brief Runs a PME reciprocal space calculation, computing energies and forces.
* \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for
* quadrupoles, etc.). \param parameters the list of parameters associated with each atom (charges, C6
* coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL
* is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering
*
* 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ...
*
* i.e. generated by the python loops
* \code{.py}
* for L in range(maxAM+1):
* for Lz in range(0,L+1):
* for Ly in range(0, L - Lz + 1):
* Lx = L - Ly - Lz
* \endcode
* \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}.
* \param energy pointer to the variable holding the energy; this is incremented, not assigned.
* \param forces a Nx3 matrix of the forces, ordered in memory as {Fx1,Fy1,Fz1,Fx2,Fy2,Fz2,....FxN,FyN,FzN}.
* This matrix is incremented, not assigned.
* \return the reciprocal space energy.
*/
Real computeEFRec(int parameterAngMom, const RealMat ¶meters, const RealMat &coordinates, RealMat &forces) {
sanityChecks(parameterAngMom, parameters, coordinates);
// Spline derivative level bumped by 1, for energy gradients.
filterAtomsAndBuildSplineCache(parameterAngMom + 1, coordinates);
auto realGrid = spreadParameters(parameterAngMom, parameters);
Real energy;
if (algorithmType_ == AlgorithmType::PME) {
auto gridAddress = forwardTransform(realGrid);
energy = convolveE(gridAddress);
auto potentialGrid = inverseTransform(gridAddress);
probeGrid(potentialGrid, parameterAngMom, parameters, forces);
} else if (algorithmType_ == AlgorithmType::CompressedPME) {
auto gridAddress = compressedForwardTransform(realGrid);
energy = convolveE(gridAddress);
auto potentialGrid = compressedInverseTransform(gridAddress);
probeGrid(potentialGrid, parameterAngMom, parameters, forces);
} else {
std::logic_error("Unknown algorithm in helpme::computeEFRec");
}
return energy;
}
/*!
* \brief Runs a PME reciprocal space calculation, computing energies, forces and the virial.
* \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for
* quadrupoles, etc.). \param parameters the list of parameters associated with each atom (charges, C6
* coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL
* is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering
*
* 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ...
*
* i.e. generated by the python loops
* \code{.py}
* for L in range(maxAM+1):
* for Lz in range(0,L+1):
* for Ly in range(0, L - Lz + 1):
* Lx = L - Ly - Lz
* \endcode
* \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}.
* \param energy pointer to the variable holding the energy; this is incremented, not assigned.
* \param forces a Nx3 matrix of the forces, ordered in memory as {Fx1,Fy1,Fz1,Fx2,Fy2,Fz2,....FxN,FyN,FzN}.
* This matrix is incremented, not assigned.
* \param virial a vector of length 6 containing the unique virial elements, in the order XX XY YY XZ YZ ZZ.
* This vector is incremented, not assigned.
* \return the reciprocal space energy.
*/
Real computeEFVRec(int parameterAngMom, const RealMat ¶meters, const RealMat &coordinates, RealMat &forces,
RealMat &virial) {
sanityChecks(parameterAngMom, parameters, coordinates);
// Spline derivative level bumped by 1, for energy gradients.
filterAtomsAndBuildSplineCache(parameterAngMom + 1, coordinates);
auto realGrid = spreadParameters(parameterAngMom, parameters);
Real energy;
if (algorithmType_ == AlgorithmType::PME) {
auto gridAddress = forwardTransform(realGrid);
energy = convolveEV(gridAddress, virial);
auto potentialGrid = inverseTransform(gridAddress);
probeGrid(potentialGrid, parameterAngMom, parameters, forces, virial[0]);
} else if (algorithmType_ == AlgorithmType::CompressedPME) {
auto gridAddress = compressedForwardTransform(realGrid);
Real *convolvedGrid;
energy = convolveEV(gridAddress, convolvedGrid, virial);
auto potentialGrid = compressedInverseTransform(convolvedGrid);
probeGrid(potentialGrid, parameterAngMom, parameters, forces);
} else {
std::logic_error("Unknown algorithm in helpme::computeEFVRec");
}
return energy;
}
/*!
* \brief Runs a full (direct and reciprocal space) PME calculation, computing the energy. The direct space
* implementation here is not totally optimal, so this routine should primarily be used for testing and
* debugging.
* \param includedList dense list of included atom pairs, ordered like i1, j1, i2, j2, i3, j3, ... iN,jN.
* \param excludedList dense list of excluded atom pairs, ordered like i1, j1, i2, j2, i3, j3, ... iN, jN.
* \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for
* quadrupoles, etc.). \param parameters the list of parameters associated with each atom (charges, C6
* coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL
* is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering
*
* 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ...
*
* i.e. generated by the python loops
* \code{.py}
* for L in range(maxAM+1):
* for Lz in range(0,L+1):
* for Ly in range(0, L - Lz + 1):
* Lx = L - Ly - Lz
* \endcode
* \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}.
* \param energy pointer to the variable holding the energy; this is incremented, not assigned.
* \param forces a Nx3 matrix of the forces, ordered in memory as {Fx1,Fy1,Fz1,Fx2,Fy2,Fz2,....FxN,FyN,FzN}.
* This matrix is incremented, not assigned.
* \return the full PME energy.
*/
Real computeEAll(const Matrix<short> &includedList, const Matrix<short> &excludedList, int parameterAngMom,
const RealMat ¶meters, const RealMat &coordinates) {
sanityChecks(parameterAngMom, parameters, coordinates);
Real energy = computeERec(parameterAngMom, parameters, coordinates);
energy += computeESlf(parameterAngMom, parameters);
energy += computeEDir(includedList, parameterAngMom, parameters, coordinates);
energy += computeEAdj(excludedList, parameterAngMom, parameters, coordinates);
return energy;
}
/*!
* \brief Runs a full (direct and reciprocal space) PME calculation, computing energies and forces. The direct
* space implementation here is not totally optimal, so this routine should primarily be used for testing
* and debugging.
* \param includedList dense list of included atom pairs, ordered like i1, j1, i2, j2, i3, j3, ... iN, jN.
* \param excludedList dense list of excluded atom pairs, ordered like i1, j1, i2, j2, i3, j3, ... iN, jN.
* \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for
* quadrupoles, etc.). \param parameters the list of parameters associated with each atom (charges, C6
* coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL
* is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering
*
* 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ...
*
* i.e. generated by the python loops
* \code{.py}
* for L in range(maxAM+1):
* for Lz in range(0,L+1):
* for Ly in range(0, L - Lz + 1):
* Lx = L - Ly - Lz
* \endcode
* \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}.
* \param energy pointer to the variable holding the energy; this is incremented, not assigned.
* \param forces a Nx3 matrix of the forces, ordered in memory as {Fx1,Fy1,Fz1,Fx2,Fy2,Fz2,....FxN,FyN,FzN}.
* This matrix is incremented, not assigned.
* \return the full PME energy.
*/
Real computeEFAll(const Matrix<short> &includedList, const Matrix<short> &excludedList, int parameterAngMom,
const RealMat ¶meters, const RealMat &coordinates, RealMat &forces) {
sanityChecks(parameterAngMom, parameters, coordinates);
Real energy = computeEFRec(parameterAngMom, parameters, coordinates, forces);
energy += computeESlf(parameterAngMom, parameters);
energy += computeEFDir(includedList, parameterAngMom, parameters, coordinates, forces);
energy += computeEFAdj(excludedList, parameterAngMom, parameters, coordinates, forces);
return energy;
}
/*!
* \brief Runs a full (direct and reciprocal space) PME calculation, computing energies, forces and virials.
* The direct space implementation here is not totally optimal, so this routine should primarily
* be used for testing and debugging.
* \param includedList dense list of included atom pairs, ordered like i1, j1, i2, j2, i3, j3, ... iN, jN.
* \param excludedList dense list of excluded atom pairs, ordered like i1, j1, i2, j2, i3, j3, ... iN, jN.
* \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for
* quadrupoles, etc.).
* \param parameters the list of parameters associated with each atom (charges, C6
* coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL
* is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering
*
* 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ...
*
* i.e. generated by the python loops
* \code{.py}
* for L in range(maxAM+1):
* for Lz in range(0,L+1):
* for Ly in range(0, L - Lz + 1):
* Lx = L - Ly - Lz
* \endcode
* \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}.
* \param energy pointer to the variable holding the energy; this is incremented, not assigned.
* \param forces a Nx3 matrix of the forces, ordered in memory as {Fx1,Fy1,Fz1,Fx2,Fy2,Fz2,....FxN,FyN,FzN}.
* This matrix is incremented, not assigned.
* \param virial a vector of length 6 containing the unique virial elements, in the order XX XY YY XZ YZ ZZ.
* This vector is incremented, not assigned.
* \return the full PME energy.
*/
Real computeEFVAll(const Matrix<short> &includedList, const Matrix<short> &excludedList, int parameterAngMom,
const RealMat ¶meters, const RealMat &coordinates, RealMat &forces, RealMat &virial) {
sanityChecks(parameterAngMom, parameters, coordinates);
Real energy = computeEFVRec(parameterAngMom, parameters, coordinates, forces, virial);
energy += computeESlf(parameterAngMom, parameters);
energy += computeEFVDir(includedList, parameterAngMom, parameters, coordinates, forces, virial);
energy += computeEFVAdj(excludedList, parameterAngMom, parameters, coordinates, forces, virial);
return energy;
}
/*!
* \brief setup initializes this object for a PME calculation using only threading.
* This may be called repeatedly without compromising performance.
* \param rPower the exponent of the (inverse) distance kernel (e.g. 1 for Coulomb, 6 for attractive
* dispersion).
* \param kappa the attenuation parameter in units inverse of those used to specify coordinates.
* \param splineOrder the order of B-spline; must be at least (2 + max. multipole order + deriv. level needed).
* \param dimA the dimension of the FFT grid along the A axis.
* \param dimB the dimension of the FFT grid along the B axis.
* \param dimC the dimension of the FFT grid along the C axis.
* \param scaleFactor a scale factor to be applied to all computed energies and derivatives thereof (e.g. the
* 1 / [4 pi epslion0] for Coulomb calculations).
* \param nThreads the maximum number of threads to use for each MPI instance; if set to 0 all available threads
* are used.
*/
void setup(int rPower, Real kappa, int splineOrder, int dimA, int dimB, int dimC, Real scaleFactor, int nThreads) {
setupCalculationMetadata(rPower, kappa, splineOrder, dimA, dimB, dimC, dimA, dimB, dimC, scaleFactor, nThreads,
0, NodeOrder::ZYX, 1, 1, 1);
}
/*!
* \brief setupCompressed initializes this object for a compressed PME calculation using only threading.
* This may be called repeatedly without compromising performance.
* \param rPower the exponent of the (inverse) distance kernel (e.g. 1 for Coulomb, 6 for attractive
* dispersion).
* \param kappa the attenuation parameter in units inverse of those used to specify coordinates.
* \param splineOrder the order of B-spline; must be at least (2 + max. multipole order + deriv. level needed).
* \param dimA the dimension of the FFT grid along the A axis.
* \param dimB the dimension of the FFT grid along the B axis.
* \param dimC the dimension of the FFT grid along the C axis.
* \param maxKA the maximum K value in the reciprocal sum along the A axis.
* \param maxKB the maximum K value in the reciprocal sum along the B axis.
* \param maxKC the maximum K value in the reciprocal sum along the C axis.
* \param scaleFactor a scale factor to be applied to all computed energies and derivatives thereof (e.g. the
* 1 / [4 pi epslion0] for Coulomb calculations).
* \param nThreads the maximum number of threads to use for each MPI instance; if set to 0 all available threads
* are used.
*/
void setupCompressed(int rPower, Real kappa, int splineOrder, int dimA, int dimB, int dimC, int maxKA, int maxKB,
int maxKC, Real scaleFactor, int nThreads) {
setupCalculationMetadata(rPower, kappa, splineOrder, dimA, dimB, dimC, maxKA, maxKB, maxKC, scaleFactor,
nThreads, 0, NodeOrder::ZYX, 1, 1, 1);
}
#if HAVE_MPI == 1
/*!
* \brief setupParallel initializes this object for a conventional PME calculation using MPI parallism
* and threading. This may be called repeatedly without compromising performance.
* \param rPower the exponent of the (inverse) distance kernel (e.g. 1 for Coulomb, 6 for attractive
* dispersion).
* \param kappa the attenuation parameter in units inverse of those used to specify coordinates.
* \param splineOrder the order of B-spline; must be at least (2 + max. multipole order + deriv. level needed).
* \param dimA the dimension of the FFT grid along the A axis.
* \param dimB the dimension of the FFT grid along the B axis.
* \param dimC the dimension of the FFT grid along the C axis.
* \param scaleFactor a scale factor to be applied to all computed energies and derivatives thereof (e.g. the
* 1 / [4 pi epslion0] for Coulomb calculations).
* \param nThreads the maximum number of threads to use for each MPI instance; if set to 0 all available threads
* are used.
* \param communicator the MPI communicator for the reciprocal space calcultion, which should already be
* initialized.
* \param numNodesA the number of nodes to be used for the A dimension.
* \param numNodesB the number of nodes to be used for the B dimension.
* \param numNodesC the number of nodes to be used for the C dimension.
*/
void setupParallel(int rPower, Real kappa, int splineOrder, int dimA, int dimB, int dimC, Real scaleFactor,
int nThreads, const MPI_Comm &communicator, NodeOrder nodeOrder, int numNodesA, int numNodesB,
int numNodesC) {
setupCalculationMetadata(rPower, kappa, splineOrder, dimA, dimB, dimC, dimA, dimB, dimC, scaleFactor, nThreads,
(void *)&communicator, nodeOrder, numNodesA, numNodesB, numNodesC);
}
/*!
* \brief setupCompressedParallel initializes this object for a compressed PME calculation using MPI parallism
* and threading. This may be called repeatedly without compromising performance.
* \param rPower the exponent of the (inverse) distance kernel (e.g. 1 for Coulomb, 6 for attractive
* dispersion).
* \param kappa the attenuation parameter in units inverse of those used to specify coordinates.
* \param splineOrder the order of B-spline; must be at least (2 + max. multipole order + deriv. level needed).
* \param dimA the dimension of the FFT grid along the A axis.
* \param dimB the dimension of the FFT grid along the B axis.
* \param dimC the dimension of the FFT grid along the C axis.
* \param maxKA the maximum K value in the reciprocal sum along the A axis.
* \param maxKB the maximum K value in the reciprocal sum along the B axis.
* \param maxKC the maximum K value in the reciprocal sum along the C axis.
* \param scaleFactor a scale factor to be applied to all computed energies and derivatives thereof (e.g. the
* 1 / [4 pi epslion0] for Coulomb calculations).
* \param nThreads the maximum number of threads to use for each MPI instance; if set to 0 all available threads
* are used.
* \param communicator the MPI communicator for the reciprocal space calcultion, which should already be
* initialized.
* \param numNodesA the number of nodes to be used for the A dimension.
* \param numNodesB the number of nodes to be used for the B dimension.
* \param numNodesC the number of nodes to be used for the C dimension.
*/
void setupCompressedParallel(int rPower, Real kappa, int splineOrder, int dimA, int dimB, int dimC, int maxKA,
int maxKB, int maxKC, Real scaleFactor, int nThreads, const MPI_Comm &communicator,
NodeOrder nodeOrder, int numNodesA, int numNodesB, int numNodesC) {
setupCalculationMetadata(rPower, kappa, splineOrder, dimA, dimB, dimC, maxKA, maxKB, maxKC, scaleFactor,
nThreads, (void *)&communicator, nodeOrder, numNodesA, numNodesB, numNodesC);
}
#endif
};
} // Namespace helpme
using PMEInstanceD = helpme::PMEInstance<double>;
using PMEInstanceF = helpme::PMEInstance<float>;
#else
// C header
#include <stddef.h>
#if HAVE_MPI == 1
#include <mpi.h>
#endif
typedef enum { Undefined = 0, XAligned = 1, ShapeMatrix = 2 } LatticeType;
typedef enum { /* Undefined comes from the above scope */ ZYX = 1 } NodeOrder;
typedef struct PMEInstance PMEInstance;
extern struct PMEInstance *helpme_createD();
extern struct PMEInstance *helpme_createF();
extern void helpme_destroyD(struct PMEInstance *pme);
extern void helpme_destroyF(struct PMEInstance *pme);
extern void helpme_setupD(struct PMEInstance *pme, int rPower, double kappa, int splineOrder, int aDim, int bDim,
int cDim, double scaleFactor, int nThreads);
extern void helpme_setupF(struct PMEInstance *pme, int rPower, float kappa, int splineOrder, int aDim, int bDim,
int cDim, float scaleFactor, int nThreads);
extern void helpme_setup_compressedD(struct PMEInstance *pme, int rPower, double kappa, int splineOrder, int aDim,
int bDim, int cDim, int maxKA, int maxKB, int maxKC, double scaleFactor,
int nThreads);
extern void helpme_setup_compressedF(struct PMEInstance *pme, int rPower, float kappa, int splineOrder, int aDim,
int bDim, int cDim, int maxKA, int maxKB, int maxKC, float scaleFactor,
int nThreads);
#if HAVE_MPI == 1
extern void helpme_setup_parallelD(PMEInstance *pme, int rPower, double kappa, int splineOrder, int dimA, int dimB,
int dimC, double scaleFactor, int nThreads, MPI_Comm communicator,
NodeOrder nodeOrder, int numNodesA, int numNodesB, int numNodesC);
extern void helpme_setup_parallelF(PMEInstance *pme, int rPower, float kappa, int splineOrder, int dimA, int dimB,
int dimC, float scaleFactor, int nThreads, MPI_Comm communicator,
NodeOrder nodeOrder, int numNodesA, int numNodesB, int numNodesC);
extern void helpme_setup_compressed_parallelD(PMEInstance *pme, int rPower, double kappa, int splineOrder, int dimA,
int dimB, int dimC, int maxKA, int maxKB, int maxKC, double scaleFactor,
int nThreads, MPI_Comm communicator, NodeOrder nodeOrder, int numNodesA,
int numNodesB, int numNodesC);
extern void helpme_setup_compressed_parallelF(PMEInstance *pme, int rPower, float kappa, int splineOrder, int dimA,
int dimB, int dimC, int maxKA, int maxKB, int maxKC, float scaleFactor,
int nThreads, MPI_Comm communicator, NodeOrder nodeOrder, int numNodesA,
int numNodesB, int numNodesC);
#endif // HAVE_MPI
extern void helpme_set_lattice_vectorsD(struct PMEInstance *pme, double A, double B, double C, double kappa,
double beta, double gamma, LatticeType latticeType);
extern void helpme_set_lattice_vectorsF(struct PMEInstance *pme, float A, float B, float C, float kappa, float beta,
float gamma, LatticeType latticeType);
extern double helpme_compute_E_recD(struct PMEInstance *pme, size_t nAtoms, int parameterAngMom, double *parameters,
double *coordinates);
extern float helpme_compute_E_recF(struct PMEInstance *pme, size_t nAtoms, int parameterAngMom, float *parameters,
float *coordinates);
extern double helpme_compute_EF_recD(struct PMEInstance *pme, size_t nAtoms, int parameterAngMom, double *parameters,
double *coordinates, double *forces);
extern float helpme_compute_EF_recF(struct PMEInstance *pme, size_t nAtoms, int parameterAngMom, float *parameters,
float *coordinates, float *forces);
extern double helpme_compute_EFV_recD(struct PMEInstance *pme, size_t nAtoms, int parameterAngMom, double *parameters,
double *coordinates, double *forces, double *virial);
extern float helpme_compute_EFV_recF(struct PMEInstance *pme, size_t nAtoms, int parameterAngMom, float *parameters,
float *coordinates, float *forces, float *virial);
extern void helpme_compute_P_recD(struct PMEInstance *pme, size_t nAtoms, int parameterAngMom, double *parameters,
double *coordinates, size_t nGridPoints, double *gridPoints, int derivativeLevel,
double *potential);
extern void helpme_compute_P_recF(struct PMEInstance *pme, size_t nAtoms, int parameterAngMom, float *parameters,
float *coordinates, size_t nGridPoints, float *gridPoints, int derivativeLevel,
float *potential);
#endif // C++/C
#endif // Header guard
|
block_jacobi_precond.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <math.h>
#include <omp.h>
#include "H2Pack.h"
#include "block_jacobi_precond.h"
// Construct a block_jacobi_precond from a H2Pack structure
void H2P_build_block_jacobi_precond(H2Pack_p h2pack, const DTYPE shift, block_jacobi_precond_p *precond_)
{
block_jacobi_precond_p precond = (block_jacobi_precond_p) malloc(sizeof(block_jacobi_precond_s));
assert(precond != NULL);
int n_point = h2pack->n_point;
int n_block = h2pack->n_leaf_node;
int n_thread = h2pack->n_thread;
int krnl_dim = h2pack->krnl_dim;
int *pt_cluster = h2pack->pt_cluster;
int *leaf_nodes = h2pack->height_nodes;
int *D_nrow = h2pack->D_nrow;
DTYPE *coord = h2pack->coord;
double st = get_wtime_sec();
int *blk_sizes = (int*) malloc(sizeof(int) * n_block);
int *blk_displs = (int*) malloc(sizeof(int) * (n_block + 1));
size_t *blk_inv_ptr = (size_t*) malloc(sizeof(size_t) * n_block);
assert(blk_sizes != NULL && blk_displs != NULL && blk_inv_ptr != NULL);
size_t blk_total_size = 0;
blk_displs[0] = 0;
for (int i = 0; i < n_block; i++)
{
int node = leaf_nodes[i];
blk_sizes[i] = D_nrow[i];
blk_inv_ptr[i] = blk_total_size;
blk_displs[i + 1] = blk_displs[i] + D_nrow[i];
blk_total_size += D_nrow[i] * D_nrow[i];
}
DTYPE *blk_inv = (DTYPE*) malloc(sizeof(DTYPE) * blk_total_size);
ASSERT_PRINTF(blk_inv != NULL, "Failed to allocate array of size %zu for block Jacobi preconditioner\n", blk_total_size);
size_t total_msize = sizeof(int) * (2 * n_block + 1) + sizeof(size_t) * n_block + sizeof(DTYPE) * blk_total_size;
int *all_ipiv = (int*) malloc(sizeof(int) * n_point * krnl_dim);
#pragma omp parallel num_threads(n_thread)
{
int tid = omp_get_thread_num();
#pragma omp for schedule(dynamic)
for (int i = 0; i < n_block; i++)
{
int node = leaf_nodes[i];
int pt_s = pt_cluster[2 * node];
int pt_e = pt_cluster[2 * node + 1];
int npt = pt_e - pt_s + 1;
int blk_size = blk_sizes[i];
int *ipiv = all_ipiv + pt_s * krnl_dim;
DTYPE *blk_node = blk_inv + blk_inv_ptr[i];
if (blk_size == 0) continue;
h2pack->krnl_eval(
coord + pt_s, n_point, npt,
coord + pt_s, n_point, npt,
h2pack->krnl_param, blk_node, npt * krnl_dim
);
for (int j = 0; j < blk_size; j++)
blk_node[j * blk_size + j] += shift;
int info;
info = LAPACK_GETRF(LAPACK_ROW_MAJOR, blk_size, blk_size, blk_node, blk_size, ipiv);
ASSERT_PRINTF(info == 0, "Node %d: blk_size = %d, LAPACK_GETRF return %d\n", node, blk_size, info);
info = LAPACK_GETRI(LAPACK_ROW_MAJOR, blk_size, blk_node, blk_size, ipiv);
ASSERT_PRINTF(info == 0, "Node %d: blk_size = %d, LAPACK_GETRI return %d\n", node, blk_size, info);
} // End of i loop
} // End of "#pragma omp parallel"
free(all_ipiv);
double et = get_wtime_sec();
size_t pmt_idx_bytes = sizeof(int) * h2pack->krnl_mat_size;
size_t pmt_vec_bytes = sizeof(DTYPE) * h2pack->krnl_mat_size;
int *fwd_pmt = (int*) malloc(pmt_idx_bytes);
int *bwd_pmt = (int*) malloc(pmt_idx_bytes);
DTYPE *pmt_b = (DTYPE*) malloc(pmt_vec_bytes);
DTYPE *pmt_x = (DTYPE*) malloc(pmt_vec_bytes);
ASSERT_PRINTF(
fwd_pmt != NULL && bwd_pmt != NULL && pmt_b != NULL && pmt_x != NULL,
"Failed to allocate vector permutation arrays for FSAI preconditioner\n"
);
memcpy(fwd_pmt, h2pack->fwd_pmt_idx, pmt_idx_bytes);
memcpy(bwd_pmt, h2pack->bwd_pmt_idx, pmt_idx_bytes);
total_msize += 2 * (pmt_idx_bytes + pmt_vec_bytes);
precond->mat_size = h2pack->krnl_mat_size;
precond->n_block = n_block;
precond->blk_sizes = blk_sizes;
precond->blk_displs = blk_displs;
precond->blk_inv = blk_inv;
precond->pmt_b = pmt_b;
precond->pmt_x = pmt_x;
precond->fwd_pmt = fwd_pmt;
precond->bwd_pmt = bwd_pmt;
precond->blk_inv_ptr = blk_inv_ptr;
precond->t_build = et - st;
precond->t_apply = 0.0;
precond->n_apply = 0;
precond->mem_MB = (double) total_msize / 1048576.0;
*precond_ = precond;
}
// Apply block Jacobi preconditioner, x := M_{BJP}^{-1} * b
void block_jacobi_precond_apply(block_jacobi_precond_p precond, const DTYPE *b, DTYPE *x)
{
if (precond == NULL) return;
int mat_size = precond->mat_size;
int n_block = precond->n_block;
int *blk_sizes = precond->blk_sizes;
int *blk_displs = precond->blk_displs;
int *fwd_pmt = precond->fwd_pmt;
int *bwd_pmt = precond->bwd_pmt;
size_t *blk_inv_ptr = precond->blk_inv_ptr;
DTYPE *blk_inv = precond->blk_inv;
DTYPE *pmt_b = precond->pmt_b;
DTYPE *pmt_x = precond->pmt_x;
double st = get_wtime_sec();
gather_vector_elements(sizeof(DTYPE), mat_size, fwd_pmt, b, pmt_b);
#pragma omp parallel for schedule(dynamic)
for (int i = 0; i < n_block; i++)
{
int blk_size_i = blk_sizes[i];
int blk_spos_i = blk_displs[i];
const DTYPE *blk_inv_i = blk_inv + blk_inv_ptr[i];
const DTYPE *b_blk = pmt_b + blk_spos_i;
DTYPE *x_blk = pmt_x + blk_spos_i;
CBLAS_GEMV(
CblasRowMajor, CblasNoTrans, blk_size_i, blk_size_i,
1.0, blk_inv_i, blk_size_i, b_blk, 1, 0.0, x_blk, 1
);
}
gather_vector_elements(sizeof(DTYPE), mat_size, bwd_pmt, pmt_x, x);
double et = get_wtime_sec();
precond->t_apply += et - st;
precond->n_apply++;
}
// Destroy a block_jacobi_precond structure
void block_jacobi_precond_destroy(block_jacobi_precond_p *precond_)
{
block_jacobi_precond_p precond = *precond_;
if (precond == NULL) return;
free(precond->blk_sizes);
free(precond->blk_displs);
free(precond->blk_inv);
free(precond->pmt_b);
free(precond->pmt_x);
free(precond->fwd_pmt);
free(precond->bwd_pmt);
free(precond->blk_inv_ptr);
free(precond);
*precond_ = NULL;
}
// Print statistic info of a FSAI_precond structure
void block_jacobi_precond_print_stat(block_jacobi_precond_p precond)
{
if (precond == NULL) return;
printf(
"Block Jacobi precond used memory = %.2lf MB, build time = %.3lf sec, apply avg time = %.3lf sec\n",
precond->mem_MB, precond->t_build, precond->t_apply / (double) precond->n_apply
);
}
|
BatchNormalization.h | // --------------------------------------------------------------------------
// Binary Brain -- binary neural net framework
//
// Copyright (C) 2018 by Ryuji Fuchikami
// https://github.com/ryuz
// ryuji.fuchikami@nifty.com
// --------------------------------------------------------------------------
#pragma once
#include <cstdlib>
#ifdef BB_WITH_CEREAL
#include <cereal/archives/json.hpp>
#include <cereal/types/vector.hpp>
#include <cereal/types/array.hpp>
#endif
#include "bb/Manager.h"
#include "bb/DataType.h"
#include "bb/Model.h"
#include "bb/Activation.h"
#include "bb/FrameBuffer.h"
#include "bb/SimdSupport.h"
#ifdef BB_WITH_CUDA
#include "bbcu/bbcu.h"
#include "bbcu/bbcu_util.h"
#endif
namespace bb {
// BatchNormalization
template <typename T = float>
class BatchNormalization : public Activation
{
using _super = Activation;
public:
static inline std::string ModelName(void) { return "BatchNormalization"; }
static inline std::string ObjectName(void){ return ModelName() + "_" + DataType<T>::Name(); }
std::string GetModelName(void) const override { return ModelName(); }
std::string GetObjectName(void) const override { return ObjectName(); }
protected:
bool m_bypass = false;
bool m_host_only = false;
bool m_host_simd = false;
bool m_fix_gamma = false;
bool m_fix_beta = false;
std::shared_ptr<Tensor> m_gamma;
std::shared_ptr<Tensor> m_beta;
std::shared_ptr<Tensor> m_dgamma;
std::shared_ptr<Tensor> m_dbeta;
std::stack< Tensor_<T> > m_stack_mean; // 平均値
std::stack< Tensor_<T> > m_stack_rstd; // 標準偏差の逆数
Tensor_<T> m_running_mean;
Tensor_<T> m_running_var;
T m_momentum = (T)0.9;
T m_init_gamma;
T m_init_beta;
public:
struct create_t
{
T momentum = (T)0.9;
T gamma = (T)1.0;
T beta = (T)0.0;
bool fix_gamma = false;
bool fix_beta = false;
};
protected:
BatchNormalization(create_t const &create)
{
m_gamma = std::make_shared<Tensor>();
m_beta = std::make_shared<Tensor>();
m_dgamma = std::make_shared<Tensor>();
m_dbeta = std::make_shared<Tensor>();
m_momentum = create.momentum;
m_init_gamma = create.gamma;
m_init_beta = create.beta;
m_fix_gamma = create.fix_gamma;
m_fix_beta = create.fix_beta;
}
void CommandProc(std::vector<std::string> args) override
{
_super::CommandProc(args);
// HostOnlyモード設定
if (args.size() == 2 && args[0] == "bypass")
{
m_bypass = EvalBool(args[1]);
}
// HostOnlyモード設定
if (args.size() == 2 && args[0] == "host_only")
{
m_host_only = EvalBool(args[1]);
}
// Host SIMDモード設定
if (args.size() == 2 && args[0] == "host_simd")
{
m_host_simd = EvalBool(args[1]);
}
if (args.size() == 2 && args[0] == "set_momentum") {
m_momentum = (T)std::atof(args[1].c_str());
}
if (args.size() == 2 && args[0] == "fix_gamma") {
m_fix_gamma = EvalBool(args[1]);
}
if (args.size() == 2 && args[0] == "fix_beta") {
m_fix_beta = EvalBool(args[1]);
}
if (args.size() == 2 && args[0] == "set_gamma") {
*m_gamma = (T)std::atof(args[1].c_str());
}
if (args.size() == 2 && args[0] == "set_beta") {
*m_beta = (T)std::atof(args[1].c_str());
}
}
void PrintInfoText(std::ostream& os, std::string indent, int columns, int nest, int depth) const override
{
_super::PrintInfoText(os, indent, columns, nest, depth);
os << indent << " momentum : " << m_momentum << std::endl;
}
public:
~BatchNormalization() {}
static std::shared_ptr<BatchNormalization> Create(create_t const &create)
{
return std::shared_ptr<BatchNormalization>(new BatchNormalization(create));
}
static std::shared_ptr<BatchNormalization> Create(T momentum = (T)0.9, T gamma=(T)1.0, T beta=(T)0.0)
{
create_t create;
create.momentum = momentum;
create.gamma = gamma;
create.beta = beta;
return Create(create);
}
#ifdef BB_PYBIND11 // for python
static std::shared_ptr<BatchNormalization> CreatePy(
T momentum = (T)0.9,
T gamma = (T)1.0,
T beta = (T)0.0,
bool fix_gamma = false,
bool fix_beta = false
)
{
create_t create;
create.momentum = momentum;
create.gamma = gamma;
create.beta = beta;
create.fix_gamma = fix_gamma;
create.fix_beta = fix_beta;
return Create(create);
}
#endif
protected:
void DumpObjectData(std::ostream &os) const override
{
// バージョン
std::int64_t ver = 1;
bb::SaveValue(os, ver);
// 親クラス
_super::DumpObjectData(os);
// メンバ
bb::SaveValue(os, m_bypass);
bb::SaveValue(os, m_host_only);
bb::SaveValue(os, m_host_simd);
bb::SaveValue(os, m_fix_gamma);
bb::SaveValue(os, m_fix_beta);
bb::SaveValue(os, m_momentum);
bb::SaveValue(os, m_init_gamma);
bb::SaveValue(os, m_init_beta);
m_gamma->DumpObject(os);
m_beta->DumpObject(os);
m_running_mean.DumpObject(os);
m_running_var.DumpObject(os);
}
void LoadObjectData(std::istream &is) override
{
// バージョン
std::int64_t ver;
bb::LoadValue(is, ver);
BB_ASSERT(ver == 1);
// 親クラス
_super::LoadObjectData(is);
// メンバ
bb::LoadValue(is, m_bypass);
bb::LoadValue(is, m_host_only);
bb::LoadValue(is, m_host_simd);
bb::LoadValue(is, m_fix_gamma);
bb::LoadValue(is, m_fix_beta);
bb::LoadValue(is, m_momentum);
bb::LoadValue(is, m_init_gamma);
bb::LoadValue(is, m_init_beta);
m_gamma->LoadObject(is);
m_beta->LoadObject(is);
m_running_mean.LoadObject(is);
m_running_var.LoadObject(is);
// 再構築
auto node_size = CalcShapeSize(_super::m_shape);
m_dgamma->Resize({node_size}, DataType<T>::type); *m_dgamma = (T)0.0;
m_dbeta->Resize ({node_size}, DataType<T>::type); *m_dbeta = (T)0.0;
}
public:
// Serialize(旧バージョン)
void Save(std::ostream &os) const override
{
SaveIndices(os, _super::m_shape);
bb::SaveValue(os, m_momentum);
m_gamma->Save(os);
m_beta->Save(os);
m_running_mean.Save(os);
m_running_var.Save(os);
}
void Load(std::istream &is) override
{
this->m_shape = LoadIndices(is);
bb::LoadValue(is, m_momentum);
m_gamma->Load(is);
m_beta->Load(is);
m_running_mean.Load(is);
m_running_var.Load(is);
auto node_size = CalcShapeSize(_super::m_shape);
m_dgamma->Resize({node_size}, DataType<T>::type); *m_dgamma = (T)0.0;
m_dbeta->Resize ({node_size}, DataType<T>::type); *m_dbeta = (T)0.0;
}
#ifdef BB_WITH_CEREAL
template <class Archive>
void save(Archive& archive, std::uint32_t const version) const
{
_super::save(archive, version);
archive(cereal::make_nvp("shape", _super::m_shape));
archive(cereal::make_nvp("gamma", *m_gamma));
archive(cereal::make_nvp("beta", *m_beta));
archive(cereal::make_nvp("running_mean", m_running_mean));
archive(cereal::make_nvp("running_var", m_running_var));
}
template <class Archive>
void load(Archive& archive, std::uint32_t const version)
{
_super::load(archive, version);
archive(cereal::make_nvp("shape", _super::m_shape));
archive(cereal::make_nvp("gamma", *m_gamma));
archive(cereal::make_nvp("beta", *m_beta));
archive(cereal::make_nvp("running_mean", m_running_mean));
archive(cereal::make_nvp("running_var", m_running_var));
}
void Save(cereal::JSONOutputArchive& archive) const
{
archive(cereal::make_nvp("BatchNormalization", *this));
}
void Load(cereal::JSONInputArchive& archive)
{
archive(cereal::make_nvp("BatchNormalization", *this));
}
#endif
Tensor &gamma(void) { return *m_gamma; }
Tensor const &gamma(void) const { return *m_gamma; }
Tensor &beta(void) { return *m_beta; }
Tensor const &beta(void) const { return *m_beta; }
Tensor &dgamma(void) { return *m_dgamma; }
Tensor const &dgamma(void) const { return *m_dgamma; }
Tensor &dbeta(void) { return *m_dbeta; }
Tensor const &dbeta(void) const { return *m_dbeta; }
Tensor mean(void) { return m_stack_mean.top(); }
Tensor rstd(void) { return m_stack_rstd.top(); }
Tensor running_mean(void) { return m_running_mean; }
Tensor running_var(void) { return m_running_var; }
auto lock_gamma(void) { return m_gamma->Lock<T>(); }
auto lock_gamma_const(void) const { return m_gamma->LockConst<T>(); }
auto lock_beta(void) { return m_beta->Lock<T>(); }
auto lock_beta_const(void) const { return m_beta->LockConst<T>(); }
auto lock_dgamma(void) { return m_dgamma->Lock<T>(); }
auto lock_dgamma_const(void) const { return m_dgamma->LockConst<T>(); }
auto lock_dbeta(void) { return m_dbeta->Lock<T>(); }
auto lock_dbeta_const(void) const { return m_dbeta->LockConst<T>(); }
auto lock_running_mean(void) { return m_running_mean.Lock(); }
auto lock_running_mean_const(void) const { return m_running_mean.LockConst(); }
auto lock_running_var(void) { return m_running_var.Lock(); }
auto lock_running_var_const(void) const { return m_running_var.LockConst(); }
// debug
auto lock_tmp_mean_const(void) const { return m_stack_mean.top().LockConst(); }
auto lock_tmp_rstd_const(void) const { return m_stack_rstd.top().LockConst(); }
/**
* @brief 入力形状設定
* @detail 入力形状を設定する
* 内部変数を初期化し、以降、GetOutputShape()で値取得可能となることとする
* 同一形状を指定しても内部変数は初期化されるものとする
* @param shape 1フレームのノードを構成するshape
* @return 出力形状を返す
*/
indices_t SetInputShape(indices_t shape)
{
// 設定済みなら何もしない
if ( shape == this->GetInputShape() ) {
return this->GetOutputShape();
}
_super::SetInputShape(shape);
auto node_size = CalcShapeSize(shape);
// パラメータ初期化
m_gamma->Resize ({node_size}, DataType<T>::type); *m_gamma = m_init_gamma;
m_beta->Resize ({node_size}, DataType<T>::type); *m_beta = m_init_beta;
m_dgamma->Resize({node_size}, DataType<T>::type); *m_dgamma = (T)0.0;
m_dbeta->Resize ({node_size}, DataType<T>::type); *m_dbeta = (T)0.0;
m_running_mean.Resize(node_size); m_running_mean = (T)0.0;
m_running_var.Resize(node_size); m_running_var = (T)1.0;
return shape;
}
public:
/**
* @brief パラメータ取得
* @detail パラメータを取得する
* Optimizerでの利用を想定
* @return パラメータを返す
*/
Variables GetParameters(void) override
{
Variables parameters;
if ( !this->m_parameter_lock ) {
if ( !m_fix_gamma ) { parameters.PushBack(m_gamma); }
if ( !m_fix_beta ) { parameters.PushBack(m_beta); }
}
return parameters;
}
/**
* @brief 勾配取得
* @detail 勾配を取得する
* Optimizerでの利用を想定
* @return パラメータを返す
*/
Variables GetGradients(void) override
{
Variables gradients;
if ( !this->m_parameter_lock ) {
if ( !m_fix_gamma ) { gradients.PushBack(m_dgamma); }
if ( !m_fix_beta ) { gradients.PushBack(m_dbeta); }
}
return gradients;
}
// ノード単位でのForward計算
std::vector<double> ForwardNode(index_t node, std::vector<double> x_vec) const override
{
BB_DEBUG_ASSERT(node >= 0 && node < CalcShapeSize(_super::GetOutputShape()));
auto gamma_ptr = lock_gamma_const();
auto beta_ptr = lock_beta_const();
auto running_mean_ptr = m_running_mean.LockConst();
auto running_var_ptr = m_running_var.LockConst();
std::vector<double> y_vec(x_vec.size());
for (size_t i = 0; i < x_vec.size(); ++i) {
y_vec[i] = x_vec[i];
y_vec[i] -= (double)running_mean_ptr(node);
y_vec[i] /= sqrt((double)running_var_ptr(node)+ 1.0e-7);
y_vec[i] = y_vec[i] * (double)gamma_ptr(node) + (double)beta_ptr(node);
}
return y_vec;
}
void Clear(void) override
{
_super::Clear();
while (!m_stack_mean.empty()) {
m_stack_mean.pop();
}
while (!m_stack_rstd.empty()) {
m_stack_rstd.pop();
}
}
// mean と rstd は残してクリア
void ClearBuffer(void) {
_super::Clear();
}
/**
* @brief forward演算
* @detail forward演算を行う
* @param x 入力データ
* @param train 学習時にtrueを指定
* @return forward演算結果
*/
FrameBuffer Forward(FrameBuffer x_buf, bool train=true) override
{
bool update_running_param = true;
if ( this->m_parameter_lock ) {
train = false;
}
// bypass
if (m_bypass) {
return x_buf;
}
SetInputShape(x_buf.GetShape());
// 出力設定
FrameBuffer y_buf(x_buf.GetFrameSize(), x_buf.GetShape(), x_buf.GetType());
auto node_size = CalcShapeSize(this->m_shape);
Tensor_<T> mean_tensor; // 平均値
Tensor_<T> rstd_tensor; // 標準偏差の逆数
mean_tensor.Resize(node_size);
rstd_tensor.Resize(node_size);
// backwardの為に保存
if ( train ) {
PushFrameBuffer(x_buf);
m_stack_mean.push(mean_tensor);
m_stack_rstd.push(rstd_tensor);
}
#ifdef BB_WITH_CUDA
if ( DataType<T>::type == BB_TYPE_FP32 && !m_host_only && x_buf.IsDeviceAvailable() && y_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable() ) {
if ( train ) {
auto dev_x_ptr = x_buf.LockDeviceMemoryConst();
auto dev_y_ptr = y_buf.LockDeviceMemory(true);
auto dev_gamma_ptr = m_gamma->LockDeviceMemoryConst();
auto dev_beta_ptr = m_beta->LockDeviceMemoryConst();
auto dev_mean_ptr = mean_tensor.LockDeviceMemory(true);
auto dev_rstd_ptr = rstd_tensor.LockDeviceMemory(true);
auto dev_running_mean_ptr = m_running_mean.LockDeviceMemory();
auto dev_running_var_ptr = m_running_var.LockDeviceMemory();
bbcu_fp32_BatchNormalization_ForwardTraining
(
(float const *)dev_x_ptr.GetAddr(),
(float *)dev_y_ptr.GetAddr(),
(float const *)dev_gamma_ptr.GetAddr(),
(float const *)dev_beta_ptr.GetAddr(),
(float *)dev_mean_ptr.GetAddr(),
(float *)dev_rstd_ptr.GetAddr(),
(float *)dev_running_mean_ptr.GetAddr(),
(float *)dev_running_var_ptr.GetAddr(),
(float )m_momentum,
(int )x_buf.GetNodeSize(),
(int )x_buf.GetFrameSize(),
(int )x_buf.GetFrameStride() / sizeof(float),
(bool )update_running_param
);
return y_buf;
}
else {
auto dev_x_ptr = x_buf.LockDeviceMemoryConst();
auto dev_y_ptr = y_buf.LockDeviceMemory(true);
auto dev_gamma_ptr = m_gamma->LockDeviceMemoryConst();
auto dev_beta_ptr = m_beta->LockDeviceMemoryConst();
auto dev_running_mean_ptr = m_running_mean.LockDeviceMemoryConst();
auto dev_running_var_ptr = m_running_var.LockDeviceMemoryConst();
bbcu_fp32_BatchNormalization_ForwardInference
(
(float const *)dev_x_ptr.GetAddr(),
(float *)dev_y_ptr.GetAddr(),
(float const *)dev_gamma_ptr.GetAddr(),
(float const *)dev_beta_ptr.GetAddr(),
(float *)dev_running_mean_ptr.GetAddr(),
(float *)dev_running_var_ptr.GetAddr(),
(int )x_buf.GetNodeSize(),
(int )x_buf.GetFrameSize(),
(int )x_buf.GetFrameStride() / sizeof(float)
);
return y_buf;
}
}
#endif
if ( DataType<T>::type == BB_TYPE_FP32 && m_host_simd ) {
// SIMD版
auto node_size = x_buf.GetNodeSize();
auto frame_size = x_buf.GetFrameSize();
// auto frame_stride = x_buf.GetFrameStride() / sizeof(float);
const int mm256_frame_size = ((int)frame_size + 7) / 8 * 8;
auto x_ptr = x_buf.LockConst<T>();
auto y_ptr = y_buf.Lock<T>();
auto gamma_ptr = lock_gamma_const();
auto beta_ptr = lock_beta_const();
auto mean_ptr = mean_tensor.Lock(true);
auto rstd_ptr = rstd_tensor.Lock(true);
auto running_mean_ptr = m_running_mean.Lock();
auto running_var_ptr = m_running_var.Lock();
if (train) {
const __m256 reciprocal_frame_size = _mm256_set1_ps(1.0f / (float)frame_size);
const __m256 epsilon = _mm256_set1_ps(1.0e-7f);
#pragma omp parallel for
for (int node = 0; node < (int)node_size; ++node) {
float const *x_addr = x_ptr.GetAddr(node);
float *y_addr = y_ptr.GetAddr(node);
// 平均と分散計算
__m256 mean_sum = _mm256_set1_ps(0.0f);
__m256 mean_c = _mm256_set1_ps(0.0f);
__m256 var_sum = _mm256_set1_ps(0.0f);
__m256 var_c = _mm256_set1_ps(0.0f);
for ( int frame = 0; frame < mm256_frame_size; frame += 8) {
__m256 x = _mm256_load_ps(&x_addr[frame + 0]);
__m256 mean_y = _mm256_sub_ps(x, mean_c);
__m256 mean_t = _mm256_add_ps(mean_sum, mean_y);
mean_c = _mm256_sub_ps(_mm256_sub_ps(mean_t, mean_sum), mean_y);
mean_sum = mean_t;
__m256 var_y = _mm256_fmsub_ps(x, x, var_c);
__m256 var_t = _mm256_add_ps(var_sum, var_y);
var_c = _mm256_sub_ps(_mm256_sub_ps(var_t, var_sum), var_y);
var_sum = var_t;
}
__m256 mean = _mm256_mul_ps(bb_mm256_hsum_ps(mean_sum), reciprocal_frame_size);
__m256 var = _mm256_fmsub_ps(bb_mm256_hsum_ps(var_sum), reciprocal_frame_size, _mm256_mul_ps(mean, mean));
var = _mm256_max_ps(var, _mm256_set1_ps(0.0f)); // 誤差対策(負にならないようにクリップ)
__m256 varx = _mm256_max_ps(var, epsilon);
__m256 rstd = _mm256_rsqrt_ps(varx);
// __m256 half = _mm256_mul_ps(varx, _mm256_set1_ps(0.5f));
// rstd = _mm256_mul_ps(rstd, _mm256_sub_ps(_mm256_set1_ps(1.5f), _mm256_mul_ps(_mm256_mul_ps(half, rstd), rstd)));
// rstd = _mm256_mul_ps(rstd, _mm256_sub_ps(_mm256_set1_ps(1.5f), _mm256_mul_ps(_mm256_mul_ps(half, rstd), rstd)));
varx = _mm256_mul_ps(varx, _mm256_set1_ps(0.5f));
rstd = _mm256_mul_ps(rstd, _mm256_fnmadd_ps(varx, _mm256_mul_ps(rstd, rstd), _mm256_set1_ps(1.5f)));
rstd = _mm256_mul_ps(rstd, _mm256_fnmadd_ps(varx, _mm256_mul_ps(rstd, rstd), _mm256_set1_ps(1.5f)));
// 実行時の mean と var 更新
if ( update_running_param ) {
running_mean_ptr[node] = running_mean_ptr[node] * m_momentum + bb_mm256_cvtss_f32(mean) * (1.0f - m_momentum);
running_var_ptr[node] = running_var_ptr[node] * m_momentum + bb_mm256_cvtss_f32(var) * (1.0f - m_momentum);
}
// 結果の保存
mean_ptr[node] = bb_mm256_cvtss_f32(mean);
rstd_ptr[node] = bb_mm256_cvtss_f32(rstd);
// 正規化 と gamma/beta 処理
__m256 gamma = _mm256_set1_ps(gamma_ptr[node]);
__m256 beta = _mm256_set1_ps(beta_ptr[node]);
// for (int frame = 0; frame < mm256_frame_size; frame += 8) {
for (int frame = mm256_frame_size-8; frame >= 0; frame -= 8) {
__m256 x = _mm256_load_ps(&x_addr[frame]);
__m256 xn = _mm256_mul_ps(_mm256_sub_ps(x, mean), rstd);
__m256 y = _mm256_fmadd_ps(xn, gamma, beta);
_mm256_store_ps(&y_addr[frame], y);
}
}
}
else {
#pragma omp parallel for
for (int node = 0; node < (int)node_size; ++node) {
auto x_addr = x_ptr.GetAddr(node);
auto y_addr = y_ptr.GetAddr(node);
__m256 running_mean = _mm256_set1_ps(running_mean_ptr[node]);
__m256 running_var = _mm256_set1_ps(1.0f / (sqrt(running_var_ptr[node]) + 1.0e-7f));
__m256 gamma = _mm256_set1_ps(gamma_ptr[node]);
__m256 beta = _mm256_set1_ps(beta_ptr[node]);
for (int frame = 0; frame < mm256_frame_size; frame += 8) {
__m256 x = _mm256_load_ps(&x_addr[frame]);
__m256 xc = _mm256_sub_ps(x, running_mean);
__m256 xn = _mm256_mul_ps(xc, running_var);
__m256 y = _mm256_fmadd_ps(xn, gamma, beta);
_mm256_store_ps(&y_addr[frame], y);
}
}
}
return y_buf;
}
{
// 汎用版
auto node_size = x_buf.GetNodeSize();
auto frame_size = x_buf.GetFrameSize();
auto x_ptr = x_buf.LockConst<T>();
auto y_ptr = y_buf.Lock<T>();
auto gamma_ptr = lock_gamma_const();
auto beta_ptr = lock_beta_const();
auto mean_ptr = mean_tensor.Lock(true);
auto rstd_ptr = rstd_tensor.Lock(true);
auto running_mean_ptr = m_running_mean.Lock();
auto running_var_ptr = m_running_var.Lock();
if ( train ) {
#pragma omp parallel for
for (index_t node = 0; node < node_size; ++node) {
// カハンの加算アルゴリズム(Kahan summation algorithm) [意味があるかは分からないが、どうせバス律速だろうからついで]
T s1 = 0, c1 = 0, y1, t1;
T s2 = 0, c2 = 0, y2, t2;
for ( index_t frame = 0; frame < frame_size; ++frame) {
T x = x_ptr.Get(frame, node);
y1 = x - c1;
t1 = s1 + y1;
c1 = (t1 - s1) - y1;
s1 = t1;
y2 = (x * x) - c2;
t2 = s2 + y2;
c2 = (t2 - s2) - y2;
s2 = t2;
}
// 集計
T mean = s1 / (T)frame_size;
T var1 = (s2 - (mean * mean)*(T)frame_size) / (T)(frame_size - 1);
T var = (s2 - (mean * mean)*(T)frame_size) / (T)frame_size;
var = std::max((T)0, var); // 演算誤差で負にならないようにクリップ // 演算誤差で負にならないようにクリップ
T std = std::sqrt(var + (T)1.0e-7);
T rstd = (T)1.0 / std;
if ( update_running_param ) {
running_mean_ptr[node] = (running_mean_ptr[node] * m_momentum) + (mean * ((T)1.0 - m_momentum));
running_var_ptr[node] = (running_var_ptr[node] * m_momentum) + (var1 * ((T)1.0 - m_momentum));
}
mean_ptr[node] = mean;
rstd_ptr[node] = rstd;
// 正規化
T gamma = gamma_ptr[node];
T beta = beta_ptr[node];
for ( index_t frame = 0; frame < frame_size; ++frame) {
T x = x_ptr.Get(frame, node);
x = (x - mean) * rstd;
x = x * gamma + beta;
y_ptr.Set(frame, node, x);
}
}
}
else {
#pragma omp parallel for
for (index_t node = 0; node < node_size; ++node) {
T gamma = gamma_ptr[node];
T beta = beta_ptr[node];
T mean = running_mean_ptr[node];
T var = running_var_ptr[node];
T rstd = (T)1.0 / std::sqrt(var + (T)1.0e-7);
for ( index_t frame = 0; frame < frame_size; ++frame) {
T x = x_ptr.Get(frame, node);
y_ptr.Set(frame, node, ((x - mean) * rstd) * gamma + beta);
}
}
}
return y_buf;
}
}
// forward 再計算
FrameBuffer ReForward(FrameBuffer x_buf) override
{
// bypass
if (m_bypass) {
return x_buf;
}
// 出力設定
FrameBuffer y_buf(x_buf.GetFrameSize(), x_buf.GetShape(), x_buf.GetType());
// auto node_size = CalcShapeSize(this->m_shape);
// Tensor_<T> mean_tensor; // 平均値
// Tensor_<T> rstd_tensor; // 標準偏差の逆数
// mean_tensor.Resize(node_size);
// rstd_tensor.Resize(node_size);
// backwardの為に保存
PushFrameBuffer(x_buf);
// m_stack_mean.push(mean_tensor);
// m_stack_rstd.push(rstd_tensor);
BB_ASSERT(!m_stack_mean.empty()); auto mean_tensor = m_stack_mean.top();
BB_ASSERT(!m_stack_rstd.empty()); auto rstd_tensor = m_stack_rstd.top();
#ifdef BB_WITH_CUDA
if ( DataType<T>::type == BB_TYPE_FP32 && !m_host_only && x_buf.IsDeviceAvailable() && y_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable() ) {
// CUDA版
auto dev_x_ptr = x_buf.LockDeviceMemoryConst();
auto dev_y_ptr = y_buf.LockDeviceMemory(true);
auto dev_gamma_ptr = m_gamma->LockDeviceMemoryConst();
auto dev_beta_ptr = m_beta->LockDeviceMemoryConst();
auto dev_mean_ptr = mean_tensor.LockDeviceMemoryConst();
auto dev_rstd_ptr = rstd_tensor.LockDeviceMemoryConst();
bbcu_fp32_BatchNormalization_ReForward
(
(float const *)dev_x_ptr.GetAddr(),
(float *)dev_y_ptr.GetAddr(),
(float const *)dev_gamma_ptr.GetAddr(),
(float const *)dev_beta_ptr.GetAddr(),
(float *)dev_mean_ptr.GetAddr(),
(float *)dev_rstd_ptr.GetAddr(),
(int )x_buf.GetNodeSize(),
(int )x_buf.GetFrameSize(),
(int )x_buf.GetFrameStride() / sizeof(float)
);
return y_buf;
}
#endif
{
// 汎用版
auto node_size = x_buf.GetNodeSize();
auto frame_size = x_buf.GetFrameSize();
auto x_ptr = x_buf.LockConst<T>();
auto y_ptr = y_buf.Lock<T>();
auto gamma_ptr = lock_gamma_const();
auto beta_ptr = lock_beta_const();
auto mean_ptr = mean_tensor.Lock(true);
auto rstd_ptr = rstd_tensor.Lock(true);
auto running_mean_ptr = m_running_mean.Lock();
auto running_var_ptr = m_running_var.Lock();
#pragma omp parallel for
for (index_t node = 0; node < node_size; ++node) {
// 集計
T mean = mean_ptr[node];
T rstd = rstd_ptr[node];
// 正規化
T gamma = gamma_ptr[node];
T beta = beta_ptr[node];
for ( index_t frame = 0; frame < frame_size; ++frame) {
T x = x_ptr.Get(frame, node);
x = (x - mean) * rstd;
x = x * gamma + beta;
y_ptr.Set(frame, node, x);
}
}
return y_buf;
}
}
/**
* @brief backward演算
* @detail backward演算を行う
*
* @return backward演算結果
*/
FrameBuffer Backward(FrameBuffer dy_buf) override
{
if ( this->m_parameter_lock ) {
return BackwardLock(dy_buf);
}
/*
if (dy_buf.Empty()) {
m_dgamma = 0;
m_dbeta = 0;
return dy_buf;
}
*/
if (m_bypass) {
return dy_buf;
}
// forward時のxを取得
FrameBuffer x_buf = PopFrameBuffer();
BB_ASSERT(dy_buf.GetFrameSize() == x_buf.GetFrameSize());
BB_ASSERT(dy_buf.GetNodeSize() == x_buf.GetNodeSize());
BB_ASSERT(!m_stack_mean.empty()); auto mean_tensor = m_stack_mean.top(); m_stack_mean.pop();
BB_ASSERT(!m_stack_rstd.empty()); auto rstd_tensor = m_stack_rstd.top(); m_stack_rstd.pop();
// 出力設定
FrameBuffer dx_buf(dy_buf.GetFrameSize(), dy_buf.GetShape(), dy_buf.GetType());
#ifdef BB_WITH_CUDA
if ( DataType<T>::type == BB_TYPE_FP32 && !m_host_only && dy_buf.IsDeviceAvailable() && x_buf.IsDeviceAvailable() && dx_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable() ) {
auto dev_x_ptr = x_buf.LockDeviceMemoryConst();
auto dev_dy_ptr = dy_buf.LockDeviceMemoryConst();
auto dev_dx_ptr = dx_buf.LockDeviceMemory(true);
auto dev_gamma_ptr = m_gamma->LockDeviceMemoryConst();
auto dev_dgamma_ptr = m_dgamma->LockDeviceMemory();
auto dev_dbeta_ptr = m_dbeta->LockDeviceMemory();
auto dev_mean_ptr = mean_tensor.LockDeviceMemoryConst();
auto dev_rstd_ptr = rstd_tensor.LockDeviceMemoryConst();
bbcu_fp32_BatchNormalization_Backward
(
(const float *)dev_x_ptr.GetAddr(),
(const float *)dev_dy_ptr.GetAddr(),
(float *)dev_dx_ptr.GetAddr(),
(float const *)dev_gamma_ptr.GetAddr(),
(float *)dev_dgamma_ptr.GetAddr(),
(float *)dev_dbeta_ptr.GetAddr(),
(float const *)dev_mean_ptr.GetAddr(),
(float const *)dev_rstd_ptr.GetAddr(),
(float )1.0f / (float)dy_buf.GetFrameSize(),
(int )dy_buf.GetNodeSize(),
(int )dy_buf.GetFrameSize(),
(int )dy_buf.GetFrameStride() / sizeof(float)
);
return dx_buf;
}
#endif
if ( DataType<T>::type == BB_TYPE_FP32 && m_host_simd ) {
auto node_size = dy_buf.GetNodeSize();
auto frame_size = dy_buf.GetFrameSize();
// auto frame_stride = dy_buf.GetFrameStride() / sizeof(float);
const int mm256_frame_size = ((int)frame_size + 7) / 8 * 8;
auto gamma_ptr = lock_gamma_const();
// auto beta_ptr = lock_beta_const();
auto dgamma_ptr = lock_dgamma();
auto dbeta_ptr = lock_dbeta();
auto mean_ptr = mean_tensor.LockConst();
auto rstd_ptr = rstd_tensor.LockConst();
// 逆数生成
const __m256 reciprocal_frame_size = _mm256_set1_ps(1.0f / (float)frame_size);
auto x_ptr = x_buf.LockConst<T>();
// auto y_ptr = y_buf.LockConst<T>();
auto dx_ptr = dx_buf.Lock<T>();
auto dy_ptr = dy_buf.LockConst<T>();
#pragma omp parallel for
for (int node = 0; node < (int)node_size; ++node) {
auto dy_addr = dy_ptr.GetAddr(node);
auto dx_addr = dx_ptr.GetAddr(node);
auto x_addr = x_ptr.GetAddr(node);
__m256 mean = _mm256_set1_ps(mean_ptr[node]);
__m256 rstd = _mm256_set1_ps(rstd_ptr[node]);
__m256 gamma = _mm256_set1_ps(gamma_ptr[node]);
__m256 dbeta = _mm256_set1_ps(0);
__m256 dgamma = _mm256_set1_ps(0);
__m256 dstd = _mm256_set1_ps(0);
__m256 dmeanx = _mm256_set1_ps(0);
__m256 rstd2 = _mm256_mul_ps(rstd, rstd);
for (int frame = 0; frame < mm256_frame_size; frame += 8) {
__m256 x = _mm256_load_ps(&x_addr[frame]);
__m256 xc = _mm256_sub_ps(x, mean);
__m256 xn = _mm256_mul_ps(xc, rstd);
__m256 dy = _mm256_load_ps(&dy_addr[frame]);
dbeta = _mm256_add_ps(dy, dbeta);
dgamma = _mm256_fmadd_ps(xn, dy, dgamma);
__m256 dxn = _mm256_mul_ps(dy, gamma);
dstd = _mm256_fnmadd_ps(_mm256_mul_ps(dxn, xc), rstd2, dstd);
dmeanx = _mm256_fnmadd_ps(dxn, rstd, dmeanx);
}
dbeta = bb_mm256_hsum_ps(dbeta);
dgamma = bb_mm256_hsum_ps(dgamma);
dgamma_ptr[node] += bb_mm256_cvtss_f32(dgamma);
dbeta_ptr[node] += bb_mm256_cvtss_f32(dbeta);
dstd = bb_mm256_hsum_ps(dstd);
dmeanx = bb_mm256_hsum_ps(dmeanx);
__m256 dvar = _mm256_mul_ps(dstd, rstd);
__m256 dmean = _mm256_mul_ps(_mm256_fnmadd_ps(mean, dvar, dmeanx), reciprocal_frame_size);
// for (int frame = 0; frame < mm256_frame_size; frame += 8) {
for (int frame = mm256_frame_size - 8; frame >= 0; frame -= 8) {
__m256 dy = _mm256_load_ps(&dy_addr[frame]);
__m256 x = _mm256_load_ps(&x_addr[frame]);
__m256 dxn = _mm256_mul_ps(dy, gamma);
__m256 dxc = _mm256_fmadd_ps(dxn, rstd, dmean);
__m256 dx = _mm256_fmadd_ps(_mm256_mul_ps(x, dvar), reciprocal_frame_size, dxc);
_mm256_store_ps(&dx_addr[frame], dx);
}
}
return dx_buf;
}
{
// 汎用版
auto node_size = dy_buf.GetNodeSize();
auto frame_size = dy_buf.GetFrameSize();
// auto frame_stride = dy_buf.GetFrameStride() / sizeof(float);
// const int mm256_frame_size = ((int)frame_size + 7) / 8 * 8;
auto gamma_ptr = lock_gamma_const();
// auto beta_ptr = lock_beta_const();
auto dgamma_ptr = lock_dgamma();
auto dbeta_ptr = lock_dbeta();
auto mean_ptr = mean_tensor.LockConst();
auto rstd_ptr = rstd_tensor.LockConst();
auto x_ptr = x_buf.LockConst<T>();
// auto y_ptr = y_buf.LockConst<T>();
auto dx_ptr = dx_buf.Lock<T>();
auto dy_ptr = dy_buf.LockConst<T>();
#pragma omp parallel for
for (index_t node = 0; node < node_size; ++node) {
T mean = mean_ptr[node];
T rstd = rstd_ptr[node];
T gamma = gamma_ptr[node];
T dgamma = 0;
T dbeta = 0;
T dmeanx = 0;
T dstd = 0;
for ( index_t frame = 0; frame < frame_size; ++frame) {
T x = x_ptr.Get(frame, node);
T dy = dy_ptr.Get(frame, node);
T xc = x - mean;
T xn = xc * rstd;
dbeta += dy;
dgamma += xn * dy;
T dxn = gamma * dy;
dstd += -(dxn * xc * (rstd * rstd));
dmeanx += -(dxn * rstd);
}
dgamma_ptr[node] += dgamma;
dbeta_ptr[node] += dbeta;
T dvar = dstd * rstd;
T dmean = (dmeanx - (mean * dvar)) / (T)frame_size;
for ( index_t frame = 0; frame < frame_size; ++frame) {
T dy = dy_ptr.Get(frame, node);
T x = x_ptr.Get(frame, node);
T dxn = dy * gamma;
T dxc = dxn * rstd;
T dx = dxc + dmean + (x * dvar / (T)frame_size);
dx_ptr.Set(frame, node, dx);
}
}
return dx_buf;
}
}
FrameBuffer BackwardLock(FrameBuffer dy_buf)
{
// 出力設定
FrameBuffer dx_buf(dy_buf.GetFrameSize(), dy_buf.GetShape(), dy_buf.GetType());
#ifdef BB_WITH_CUDA
if ( DataType<T>::type == BB_TYPE_FP32 && !m_host_only && dy_buf.IsDeviceAvailable() && dy_buf.IsDeviceAvailable() && dx_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable() ) {
auto dev_dy_ptr = dy_buf.LockDeviceMemoryConst();
auto dev_dx_ptr = dx_buf.LockDeviceMemory(true);
auto dev_gamma_ptr = m_gamma->LockDeviceMemoryConst();
auto dev_running_var_ptr = m_running_var.LockDeviceMemoryConst();
bbcu_fp32_BatchNormalization_BackwardLock
(
(const float *)dev_dy_ptr.GetAddr(),
(float *)dev_dx_ptr.GetAddr(),
(float const *)dev_gamma_ptr.GetAddr(),
(float const *)dev_running_var_ptr.GetAddr(),
(int )dy_buf.GetNodeSize(),
(int )dy_buf.GetFrameSize(),
(int )dy_buf.GetFrameStride() / sizeof(float)
);
return dx_buf;
}
#endif
{
// 汎用版
auto node_size = dy_buf.GetNodeSize();
auto frame_size = dy_buf.GetFrameSize();
auto gamma_ptr = lock_gamma_const();
auto running_var_ptr = m_running_var.Lock();
auto dy_ptr = dy_buf.LockConst<T>();
auto dx_ptr = dx_buf.Lock<T>();
#pragma omp parallel for
for (index_t node = 0; node < node_size; ++node) {
T var = running_var_ptr[node];
T gamma = gamma_ptr[node];
T rstd = (T)1.0 / std::sqrt(var+ (T)1.0e-7);
T coeff = gamma * rstd;
for ( index_t frame = 0; frame < frame_size; ++frame) {
T dy = dy_ptr.Get(frame, node);
T dx = dy * coeff;
dx_ptr.Set(frame, node, dx);
}
}
return dx_buf;
}
}
};
}
|
GB_unop__identity_uint8_int64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_uint8_int64
// op(A') function: GB_unop_tran__identity_uint8_int64
// C type: uint8_t
// A type: int64_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint8_t z = (uint8_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint8_t z = (uint8_t) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_uint8_int64
(
uint8_t *Cx, // Cx and Ax may be aliased
const int64_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t aij = Ax [p] ;
uint8_t z = (uint8_t) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int64_t aij = Ax [p] ;
uint8_t z = (uint8_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_uint8_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
LinkedCells.h | /**
* @file LinkedCells.h
*
* @author tchipevn
* @date 17.02.2018
*/
#pragma once
#include "autopas/cells/FullParticleCell.h"
#include "autopas/containers/CellBasedParticleContainer.h"
#include "autopas/containers/CellBlock3D.h"
#include "autopas/containers/CompatibleTraversals.h"
#include "autopas/containers/LoadEstimators.h"
#include "autopas/containers/cellPairTraversals/BalancedTraversal.h"
#include "autopas/containers/linkedCells/traversals/LCTraversalInterface.h"
#include "autopas/iterators/ParticleIterator.h"
#include "autopas/iterators/RegionParticleIterator.h"
#include "autopas/options/DataLayoutOption.h"
#include "autopas/options/LoadEstimatorOption.h"
#include "autopas/particles/OwnershipState.h"
#include "autopas/utils/ArrayMath.h"
#include "autopas/utils/ParticleCellHelpers.h"
#include "autopas/utils/StringUtils.h"
#include "autopas/utils/WrapOpenMP.h"
#include "autopas/utils/inBox.h"
namespace autopas {
/**
* LinkedCells class.
* This class uses a list of neighboring cells to store the particles.
* These cells dimensions are at least as large as the given cutoff radius,
* therefore short-range interactions only need to be calculated between
* particles in neighboring cells.
* @tparam Particle type of the Particle
*/
template <class Particle>
class LinkedCells : public CellBasedParticleContainer<FullParticleCell<Particle>> {
public:
/**
* Type of the ParticleCell.
*/
using ParticleCell = FullParticleCell<Particle>;
/**
* Type of the Particle.
*/
using ParticleType = typename ParticleCell::ParticleType;
/**
* Constructor of the LinkedCells class
* @param boxMin
* @param boxMax
* @param cutoff
* @param skin
* @param cellSizeFactor cell size factor relative to cutoff
* @param loadEstimator the load estimation algorithm for balanced traversals.
* By default all applicable traversals are allowed.
*/
LinkedCells(const std::array<double, 3> boxMin, const std::array<double, 3> boxMax, const double cutoff,
const double skin, const double cellSizeFactor = 1.0,
LoadEstimatorOption loadEstimator = LoadEstimatorOption::squaredParticlesPerCell)
: CellBasedParticleContainer<ParticleCell>(boxMin, boxMax, cutoff, skin),
_cellBlock(this->_cells, boxMin, boxMax, cutoff + skin, cellSizeFactor),
_loadEstimator(loadEstimator) {}
/**
* @copydoc ParticleContainerInterface::getContainerType()
*/
[[nodiscard]] ContainerOption getContainerType() const override { return ContainerOption::linkedCells; }
/**
* @copydoc ParticleContainerInterface::getParticleCellTypeEnum()
*/
[[nodiscard]] CellType getParticleCellTypeEnum() override { return CellType::FullParticleCell; }
/**
* @copydoc ParticleContainerInterface::addParticleImpl()
*/
void addParticleImpl(const ParticleType &p) override {
ParticleCell &cell = _cellBlock.getContainingCell(p.getR());
cell.addParticle(p);
}
/**
* @copydoc ParticleContainerInterface::addHaloParticleImpl()
*/
void addHaloParticleImpl(const ParticleType &haloParticle) override {
ParticleType pCopy = haloParticle;
pCopy.setOwnershipState(OwnershipState::halo);
ParticleCell &cell = _cellBlock.getContainingCell(pCopy.getR());
cell.addParticle(pCopy);
}
/**
* @copydoc ParticleContainerInterface::updateHaloParticle()
*/
bool updateHaloParticle(const ParticleType &haloParticle) override {
ParticleType pCopy = haloParticle;
pCopy.setOwnershipState(OwnershipState::halo);
auto cells = _cellBlock.getNearbyHaloCells(pCopy.getR(), this->getSkin());
for (auto cellptr : cells) {
bool updated = internal::checkParticleInCellAndUpdateByID(*cellptr, pCopy);
if (updated) {
return true;
}
}
AutoPasLog(trace, "UpdateHaloParticle was not able to update particle: {}", pCopy.toString());
return false;
}
void deleteHaloParticles() override { _cellBlock.clearHaloCells(); }
void rebuildNeighborLists(TraversalInterface *traversal) override {
// nothing to do.
}
/**
* Generates the load estimation function depending on _loadEstimator.
* @return load estimator function object.
*/
BalancedTraversal::EstimatorFunction getLoadEstimatorFunction() {
switch (this->_loadEstimator) {
case LoadEstimatorOption::squaredParticlesPerCell: {
return [&](const std::array<unsigned long, 3> &cellsPerDimension,
const std::array<unsigned long, 3> &lowerCorner, const std::array<unsigned long, 3> &upperCorner) {
return loadEstimators::squaredParticlesPerCell(this->_cells, cellsPerDimension, lowerCorner, upperCorner);
};
}
case LoadEstimatorOption::none:
[[fallthrough]];
default: {
return
[&](const std::array<unsigned long, 3> &cellsPerDimension, const std::array<unsigned long, 3> &lowerCorner,
const std::array<unsigned long, 3> &upperCorner) { return 1; };
}
}
}
void iteratePairwise(TraversalInterface *traversal) override {
// Check if traversal is allowed for this container and give it the data it needs.
auto *traversalInterface = dynamic_cast<LCTraversalInterface<ParticleCell> *>(traversal);
auto *cellPairTraversal = dynamic_cast<CellPairTraversal<ParticleCell> *>(traversal);
if (auto *balancedTraversal = dynamic_cast<BalancedTraversal *>(traversal)) {
balancedTraversal->setLoadEstimator(getLoadEstimatorFunction());
}
if (traversalInterface && cellPairTraversal) {
cellPairTraversal->setCellsToTraverse(this->_cells);
} else {
autopas::utils::ExceptionHandler::exception(
"Trying to use a traversal of wrong type in LinkedCells::iteratePairwise. TraversalID: {}",
traversal->getTraversalType());
}
traversal->initTraversal();
traversal->traverseParticlePairs();
traversal->endTraversal();
}
[[nodiscard]] std::vector<ParticleType> updateContainer() override {
this->deleteHaloParticles();
std::vector<ParticleType> invalidParticles;
#ifdef AUTOPAS_OPENMP
#pragma omp parallel
#endif // AUTOPAS_OPENMP
{
// private for each thread!
std::vector<ParticleType> myInvalidParticles, myInvalidNotOwnedParticles;
#ifdef AUTOPAS_OPENMP
#pragma omp for
#endif // AUTOPAS_OPENMP
for (size_t cellId = 0; cellId < this->getCells().size(); ++cellId) {
// Delete dummy particles of each cell.
this->getCells()[cellId].deleteDummyParticles();
// if empty
if (not this->getCells()[cellId].isNotEmpty()) continue;
auto [cellLowerCorner, cellUpperCorner] = this->getCellBlock().getCellBoundingBox(cellId);
for (auto &&pIter = this->getCells()[cellId].begin(); pIter.isValid(); ++pIter) {
// if not in cell
if (utils::notInBox(pIter->getR(), cellLowerCorner, cellUpperCorner)) {
myInvalidParticles.push_back(*pIter);
internal::deleteParticle(pIter);
}
}
}
// implicit barrier here
// the barrier is needed because iterators are not threadsafe w.r.t. addParticle()
// this loop is executed for every thread and thus parallel. Don't use #pragma omp for here!
for (auto &&p : myInvalidParticles) {
// if not in halo
if (utils::inBox(p.getR(), this->getBoxMin(), this->getBoxMax())) {
this->template addParticle<false>(p);
} else {
myInvalidNotOwnedParticles.push_back(p);
}
}
#ifdef AUTOPAS_OPENMP
#pragma omp critical
#endif
{
// merge private vectors to global one.
invalidParticles.insert(invalidParticles.end(), myInvalidNotOwnedParticles.begin(),
myInvalidNotOwnedParticles.end());
}
}
return invalidParticles;
}
/**
* @copydoc ParticleContainerInterface::getTraversalSelectorInfo()
*/
[[nodiscard]] TraversalSelectorInfo getTraversalSelectorInfo() const override {
return TraversalSelectorInfo(this->getCellBlock().getCellsPerDimensionWithHalo(), this->getInteractionLength(),
this->getCellBlock().getCellLength(), 0);
}
[[nodiscard]] ParticleIteratorWrapper<ParticleType, true> begin(
IteratorBehavior behavior = autopas::IteratorBehavior::ownedOrHalo) override {
return ParticleIteratorWrapper<ParticleType, true>(new internal::ParticleIterator<ParticleType, ParticleCell, true>(
&this->_cells, 0, &_cellBlock, behavior, nullptr));
}
[[nodiscard]] ParticleIteratorWrapper<ParticleType, false> begin(
IteratorBehavior behavior = autopas::IteratorBehavior::ownedOrHalo) const override {
return ParticleIteratorWrapper<ParticleType, false>(
new internal::ParticleIterator<ParticleType, ParticleCell, false>(&this->_cells, 0, &_cellBlock, behavior,
nullptr));
}
[[nodiscard]] ParticleIteratorWrapper<ParticleType, true> getRegionIterator(const std::array<double, 3> &lowerCorner,
const std::array<double, 3> &higherCorner,
IteratorBehavior behavior) override {
// We increase the search region by skin, as particles can move over cell borders.
auto startIndex3D =
this->_cellBlock.get3DIndexOfPosition(utils::ArrayMath::subScalar(lowerCorner, this->getSkin()));
auto stopIndex3D =
this->_cellBlock.get3DIndexOfPosition(utils::ArrayMath::addScalar(higherCorner, this->getSkin()));
size_t numCellsOfInterest = (stopIndex3D[0] - startIndex3D[0] + 1) * (stopIndex3D[1] - startIndex3D[1] + 1) *
(stopIndex3D[2] - startIndex3D[2] + 1);
std::vector<size_t> cellsOfInterest(numCellsOfInterest);
int i = 0;
for (size_t z = startIndex3D[2]; z <= stopIndex3D[2]; ++z) {
for (size_t y = startIndex3D[1]; y <= stopIndex3D[1]; ++y) {
for (size_t x = startIndex3D[0]; x <= stopIndex3D[0]; ++x) {
cellsOfInterest[i++] =
utils::ThreeDimensionalMapping::threeToOneD({x, y, z}, this->_cellBlock.getCellsPerDimensionWithHalo());
}
}
}
return ParticleIteratorWrapper<ParticleType, true>(
new internal::RegionParticleIterator<ParticleType, ParticleCell, true>(
&this->_cells, lowerCorner, higherCorner, cellsOfInterest, &_cellBlock, behavior, nullptr));
}
[[nodiscard]] ParticleIteratorWrapper<ParticleType, false> getRegionIterator(
const std::array<double, 3> &lowerCorner, const std::array<double, 3> &higherCorner,
IteratorBehavior behavior) const override {
// We increase the search region by skin, as particles can move over cell borders.
auto startIndex3D =
this->_cellBlock.get3DIndexOfPosition(utils::ArrayMath::subScalar(lowerCorner, this->getSkin()));
auto stopIndex3D =
this->_cellBlock.get3DIndexOfPosition(utils::ArrayMath::addScalar(higherCorner, this->getSkin()));
size_t numCellsOfInterest = (stopIndex3D[0] - startIndex3D[0] + 1) * (stopIndex3D[1] - startIndex3D[1] + 1) *
(stopIndex3D[2] - startIndex3D[2] + 1);
std::vector<size_t> cellsOfInterest(numCellsOfInterest);
int i = 0;
for (size_t z = startIndex3D[2]; z <= stopIndex3D[2]; ++z) {
for (size_t y = startIndex3D[1]; y <= stopIndex3D[1]; ++y) {
for (size_t x = startIndex3D[0]; x <= stopIndex3D[0]; ++x) {
cellsOfInterest[i++] =
utils::ThreeDimensionalMapping::threeToOneD({x, y, z}, this->_cellBlock.getCellsPerDimensionWithHalo());
}
}
}
return ParticleIteratorWrapper<ParticleType, false>(
new internal::RegionParticleIterator<ParticleType, ParticleCell, false>(
&this->_cells, lowerCorner, higherCorner, cellsOfInterest, &_cellBlock, behavior, nullptr));
}
/**
* Get the cell block, not supposed to be used except by verlet lists
* @return the cell block
*/
internal::CellBlock3D<ParticleCell> &getCellBlock() { return _cellBlock; }
/**
* @copydoc getCellBlock()
* @note const version
*/
const internal::CellBlock3D<ParticleCell> &getCellBlock() const { return _cellBlock; }
/**
* Returns reference to the data of LinkedCells
* @return the data
*/
std::vector<ParticleCell> &getCells() { return this->_cells; }
protected:
/**
* object to manage the block of cells.
*/
internal::CellBlock3D<ParticleCell> _cellBlock;
/**
* load estimation algorithm for balanced traversals.
*/
autopas::LoadEstimatorOption _loadEstimator;
// ThreeDimensionalCellHandler
};
} // namespace autopas |
bfs_replicated_csc.c | /* Copyright (C) 2010 The Trustees of Indiana University. */
/* */
/* Use, modification and distribution is subject to the Boost Software */
/* License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at */
/* http://www.boost.org/LICENSE_1_0.txt) */
/* */
/* Authors: Jeremiah Willcock */
/* Andrew Lumsdaine */
#define _GNU_SOURCE
#include "common.h"
#include "oned_csc.h"
#include "onesided.h"
#include <mpi.h>
#include <stdint.h>
#include <inttypes.h>
#include <stdlib.h>
#include <stddef.h>
#include <string.h>
#include <limits.h>
#include <assert.h>
char IMPLEMENTATION[] = "MPI BFS_REPLICATED_CSC";
static oned_csc_graph g;
static unsigned long* g_in_queue;
static unsigned long* g_in_queue_summary;
static unsigned long* g_out_queue;
static unsigned long* g_out_queue_summary;
static unsigned long* g_visited;
static void allocate_memory(void) {
int64_t maxlocalverts = g.max_nlocalverts;
int64_t local_queue_summary_size = (maxlocalverts + ULONG_BITS * ULONG_BITS - 1) / ULONG_BITS / ULONG_BITS;
int64_t local_queue_size = local_queue_summary_size * ULONG_BITS;
int64_t global_queue_summary_size = MUL_SIZE(local_queue_summary_size);
int64_t global_queue_size = MUL_SIZE(local_queue_size);
g_in_queue = (unsigned long*)xmalloc(global_queue_size * sizeof(unsigned long));
g_in_queue_summary = (unsigned long*)xmalloc(global_queue_summary_size * sizeof(unsigned long));
g_out_queue = (unsigned long*)xmalloc(local_queue_size * sizeof(unsigned long));
g_out_queue_summary = (unsigned long*)xmalloc(local_queue_summary_size * sizeof(unsigned long));
g_visited = (unsigned long*)xmalloc(local_queue_size * sizeof(unsigned long));
}
static void deallocate_memory(void) {
free(g_in_queue); g_in_queue = NULL;
free(g_in_queue_summary); g_in_queue_summary = NULL;
free(g_out_queue); g_out_queue = NULL;
free(g_out_queue_summary); g_out_queue_summary = NULL;
free(g_visited); g_visited = NULL;
}
void make_graph_data_structure(const tuple_graph* const tg) {
convert_graph_to_oned_csc(tg, &g);
allocate_memory(); /* Make sure all of the space is available */
deallocate_memory();
}
void free_graph_data_structure(void) {
free_oned_csc_graph(&g);
/* deallocate_memory(); */
}
int bfs_writes_depth_map(void) {
return 0;
}
/* This version is the traditional level-synchronized BFS using two queues. A
* bitmap is used to indicate which vertices have been visited. Messages are
* sent and processed asynchronously throughout the code to hopefully overlap
* communication with computation. */
void run_bfs(int64_t root, int64_t* pred) {
allocate_memory();
const ptrdiff_t nlocalverts = g.nlocalverts;
/* const int64_t nglobalverts = g.nglobalverts; */
const size_t* const restrict rowstarts = g.rowstarts;
const int64_t* const restrict column = g.column;
/* Set up the visited bitmap. */
int lg_local_queue_size = g.lg_local_queue_size;
int64_t local_queue_size = INT64_C(1) << lg_local_queue_size;
int64_t local_queue_summary_size = local_queue_size / ULONG_BITS;
int64_t global_queue_summary_size = MUL_SIZE(local_queue_summary_size);
int64_t global_queue_size = MUL_SIZE(local_queue_size);
#if 0
int64_t* restrict column_swizzled = (int64_t*)xmalloc(nlocaledges * sizeof(int64_t));
{
size_t i;
for (i = 0; i < nlocaledges; ++i) {
int64_t c = column[i];
column_swizzled[i] = SWIZZLE_VERTEX(c);
}
}
#endif
unsigned long* restrict in_queue = g_in_queue;
memset(in_queue, 0, global_queue_size * sizeof(unsigned long));
unsigned long* restrict in_queue_summary = g_in_queue_summary;
memset(in_queue_summary, 0, global_queue_summary_size * sizeof(unsigned long));
unsigned long* restrict out_queue = g_out_queue;
unsigned long* restrict out_queue_summary = g_out_queue_summary;
unsigned long* restrict visited = g_visited;
memset(visited, 0, local_queue_size * sizeof(unsigned long));
#define SET_IN(v) do {int64_t vs = SWIZZLE_VERTEX(v); size_t word_idx = vs / ULONG_BITS; int bit_idx = vs % ULONG_BITS; unsigned long mask = (1UL << bit_idx); in_queue_summary[word_idx / ULONG_BITS] |= (1UL << (word_idx % ULONG_BITS)); in_queue[word_idx] |= mask;} while (0)
#define TEST_IN(vs) (((in_queue_summary[vs / ULONG_BITS / ULONG_BITS] & (1UL << ((vs / ULONG_BITS) % ULONG_BITS))) != 0) && ((in_queue[vs / ULONG_BITS] & (1UL << (vs % ULONG_BITS))) != 0))
#define TEST_VISITED_LOCAL(v) ((visited[(v) / ULONG_BITS] & (1UL << ((v) % ULONG_BITS))) != 0)
#define TAS_VISITED_LOCAL(v) (((__sync_fetch_and_or(&visited[(v) / ULONG_BITS], (1UL << ((v) % ULONG_BITS))) & (1UL << ((v) % ULONG_BITS))) != 0) ? 1 : (__sync_fetch_and_or(&out_queue[(v) / ULONG_BITS], (1UL << ((v) % ULONG_BITS))), 0))
// #define SET_VISITED_LOCAL(v) do {size_t word_idx = (v) / ULONG_BITS; int bit_idx = (v) % ULONG_BITS; unsigned long mask = (1UL << bit_idx); __sync_fetch_and_or(&visited[word_idx], mask); __sync_fetch_and_or(&out_queue[word_idx], mask);} while (0)
#define SET_VISITED_LOCAL(v) do {size_t word_idx = (v) / ULONG_BITS; int bit_idx = (v) % ULONG_BITS; unsigned long mask = (1UL << bit_idx); visited[word_idx] |= mask; out_queue[word_idx] |= mask;} while (0)
SET_IN(root);
{ptrdiff_t i; _Pragma("omp parallel for schedule(static)") for (i = 0; i < nlocalverts; ++i) pred[i] = -1;}
if (VERTEX_OWNER(root) == rank) {
pred[VERTEX_LOCAL(root)] = root;
SET_VISITED_LOCAL(VERTEX_LOCAL(root));
}
uint16_t cur_level = 0;
while (1) {
++cur_level;
#if 0
if (rank == 0) fprintf(stderr, "BFS level %" PRIu16 "\n", cur_level);
#endif
memset(out_queue, 0, local_queue_size * sizeof(unsigned long));
// memset(out_queue_summary, 0, local_queue_summary_size * sizeof(unsigned long));
ptrdiff_t i, ii_summary;
#if 0
#pragma omp parallel for schedule(static)
for (i = 0; i < global_queue_summary_size; ++i) {
unsigned long val = 0UL;
int j;
unsigned long mask = 1UL;
for (j = 0; j < ULONG_BITS; ++j, mask <<= 1) {
if (in_queue[i * ULONG_BITS + j]) val |= mask;
}
in_queue_summary[i] = val;
}
#endif
unsigned long not_done = 0;
#pragma omp parallel for schedule(static) reduction(|:not_done)
for (ii_summary = 0; ii_summary < global_queue_summary_size; ++ii_summary) {
uint64_t val_summary = in_queue_summary[ii_summary];
if (val_summary == 0) continue;
int ii_offset;
ptrdiff_t ii;
for (ii_offset = 0; ii_offset < ULONG_BITS; ++ii_offset) {
if ((val_summary & (UINT64_C(1) << ii_offset)) == 0) continue;
ii = ii_summary * ULONG_BITS + ii_offset;
uint64_t val = in_queue[ii];
if (val == 0) continue;
size_t i, i_end = rowstarts[ii + 1];
for (i = rowstarts[ii]; i < i_end; ++i) {
int64_t c = column[i];
int64_t v0_local = c / ULONG_BITS;
if ((val & (UINT64_C(1) << (c % ULONG_BITS))) != 0 /* TEST_IN(v1_swizzled) */ && !TAS_VISITED_LOCAL(v0_local)) {
assert (pred[v0_local] == -1);
int64_t v1_swizzled = (int64_t)ii * ULONG_BITS + c % ULONG_BITS;
pred[v0_local] = UNSWIZZLE_VERTEX(v1_swizzled);
not_done |= 1;
}
}
}
}
#if 1
#pragma omp parallel for schedule(static)
for (i = 0; i < local_queue_summary_size; ++i) {
unsigned long val = 0UL;
int j;
unsigned long mask = 1UL;
for (j = 0; j < ULONG_BITS; ++j, mask <<= 1) {
unsigned long full_val = out_queue[i * ULONG_BITS + j];
visited[i * ULONG_BITS + j] |= full_val;
if (full_val) val |= mask;
}
out_queue_summary[i] = val;
// not_done |= val;
}
#endif
MPI_Allreduce(MPI_IN_PLACE, ¬_done, 1, MPI_UNSIGNED_LONG, MPI_BOR, MPI_COMM_WORLD);
if (not_done == 0) break;
MPI_Allgather(out_queue, local_queue_size, MPI_UNSIGNED_LONG, in_queue, local_queue_size, MPI_UNSIGNED_LONG, MPI_COMM_WORLD);
MPI_Allgather(out_queue_summary, local_queue_summary_size, MPI_UNSIGNED_LONG, in_queue_summary, local_queue_summary_size, MPI_UNSIGNED_LONG, MPI_COMM_WORLD);
}
deallocate_memory();
}
void get_vertex_distribution_for_pred(size_t count, const int64_t* vertex_p, int* owner_p, size_t* local_p) {
const int64_t* restrict vertex = vertex_p;
int* restrict owner = owner_p;
size_t* restrict local = local_p;
ptrdiff_t i;
#pragma omp parallel for
for (i = 0; i < (ptrdiff_t)count; ++i) {
owner[i] = VERTEX_OWNER(vertex[i]);
local[i] = VERTEX_LOCAL(vertex[i]);
}
}
int64_t vertex_to_global_for_pred(int v_rank, size_t v_local) {
return VERTEX_TO_GLOBAL(v_rank, v_local);
}
size_t get_nlocalverts_for_pred(void) {
return g.nlocalverts;
}
|
sparselu.ref.c | #include <sys/time.h>
#include <time.h>
#include <stdio.h>
static unsigned long long current_time_ns() {
#ifdef __MACH__
clock_serv_t cclock;
mach_timespec_t mts;
host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock);
clock_get_time(cclock, &mts);
mach_port_deallocate(mach_task_self(), cclock);
unsigned long long s = 1000000000ULL * (unsigned long long)mts.tv_sec;
return (unsigned long long)mts.tv_nsec + s;
#else
struct timespec t ={0,0};
clock_gettime(CLOCK_MONOTONIC, &t);
unsigned long long s = 1000000000ULL * (unsigned long long)t.tv_sec;
return (((unsigned long long)t.tv_nsec)) + s;
#endif
}
/**********************************************************************************************/
/* This program is part of the Barcelona OpenMP Tasks Suite */
/* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */
/* Copyright (C) 2009 Universitat Politecnica de Catalunya */
/* */
/* This program is free software; you can redistribute it and/or modify */
/* it under the terms of the GNU General Public License as published by */
/* the Free Software Foundation; either version 2 of the License, or */
/* (at your option) any later version. */
/* */
/* This program is distributed in the hope that it will be useful, */
/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
/* GNU General Public License for more details. */
/* */
/* You should have received a copy of the GNU General Public License */
/* along with this program; if not, write to the Free Software */
/* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */
/**********************************************************************************************/
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <libgen.h>
#include "bots.h"
#include "sparselu.h"
/***********************************************************************
* checkmat:
**********************************************************************/
int checkmat (float *M, float *N)
{
int i, j;
float r_err;
for (i = 0; i < bots_arg_size_1; i++)
{
for (j = 0; j < bots_arg_size_1; j++)
{
r_err = M[i*bots_arg_size_1+j] - N[i*bots_arg_size_1+j];
if ( r_err == 0.0 ) continue;
if (r_err < 0.0 ) r_err = -r_err;
if ( M[i*bots_arg_size_1+j] == 0 )
{
bots_message("Checking failure: A[%d][%d]=%f B[%d][%d]=%f; \n",
i,j, M[i*bots_arg_size_1+j], i,j, N[i*bots_arg_size_1+j]);
return FALSE;
}
r_err = r_err / M[i*bots_arg_size_1+j];
if(r_err > EPSILON)
{
bots_message("Checking failure: A[%d][%d]=%f B[%d][%d]=%f; Relative Error=%f\n",
i,j, M[i*bots_arg_size_1+j], i,j, N[i*bots_arg_size_1+j], r_err);
return FALSE;
}
}
}
return TRUE;
}
/***********************************************************************
* genmat:
**********************************************************************/
void genmat (float *M[])
{
int null_entry, init_val, i, j, ii, jj;
float *p;
init_val = 1325;
/* generating the structure */
for (ii=0; ii < bots_arg_size; ii++)
{
for (jj=0; jj < bots_arg_size; jj++)
{
/* computing null entries */
null_entry=FALSE;
if ((ii<jj) && (ii%3 !=0)) null_entry = TRUE;
if ((ii>jj) && (jj%3 !=0)) null_entry = TRUE;
if (ii%2==1) null_entry = TRUE;
if (jj%2==1) null_entry = TRUE;
if (ii==jj) null_entry = FALSE;
if (ii==jj-1) null_entry = FALSE;
if (ii-1 == jj) null_entry = FALSE;
/* allocating matrix */
if (null_entry == FALSE){
M[ii*bots_arg_size+jj] = (float *) malloc(bots_arg_size_1*bots_arg_size_1*sizeof(float));
if ((M[ii*bots_arg_size+jj] == NULL))
{
bots_message("Error: Out of memory\n");
exit(101);
}
/* initializing matrix */
p = M[ii*bots_arg_size+jj];
for (i = 0; i < bots_arg_size_1; i++)
{
for (j = 0; j < bots_arg_size_1; j++)
{
init_val = (3125 * init_val) % 65536;
(*p) = (float)((init_val - 32768.0) / 16384.0);
p++;
}
}
}
else
{
M[ii*bots_arg_size+jj] = NULL;
}
}
}
}
/***********************************************************************
* print_structure:
**********************************************************************/
void print_structure(char *name, float *M[])
{
int ii, jj;
bots_message("Structure for matrix %s @ 0x%p\n",name, M);
for (ii = 0; ii < bots_arg_size; ii++) {
for (jj = 0; jj < bots_arg_size; jj++) {
if (M[ii*bots_arg_size+jj]!=NULL) {bots_message("x");}
else bots_message(" ");
}
bots_message("\n");
}
bots_message("\n");
}
/***********************************************************************
* allocate_clean_block:
**********************************************************************/
float * allocate_clean_block()
{
int i,j;
float *p, *q;
p = (float *) malloc(bots_arg_size_1*bots_arg_size_1*sizeof(float));
q=p;
if (p!=NULL){
for (i = 0; i < bots_arg_size_1; i++)
for (j = 0; j < bots_arg_size_1; j++){(*p)=0.0; p++;}
}
else
{
bots_message("Error: Out of memory\n");
exit (101);
}
return (q);
}
/***********************************************************************
* lu0:
**********************************************************************/
void lu0(float *diag)
{
int i, j, k;
for (k=0; k<bots_arg_size_1; k++)
for (i=k+1; i<bots_arg_size_1; i++)
{
diag[i*bots_arg_size_1+k] = diag[i*bots_arg_size_1+k] / diag[k*bots_arg_size_1+k];
for (j=k+1; j<bots_arg_size_1; j++)
diag[i*bots_arg_size_1+j] = diag[i*bots_arg_size_1+j] - diag[i*bots_arg_size_1+k] * diag[k*bots_arg_size_1+j];
}
}
/***********************************************************************
* bdiv:
**********************************************************************/
void bdiv(float *diag, float *row)
{
int i, j, k;
for (i=0; i<bots_arg_size_1; i++)
for (k=0; k<bots_arg_size_1; k++)
{
row[i*bots_arg_size_1+k] = row[i*bots_arg_size_1+k] / diag[k*bots_arg_size_1+k];
for (j=k+1; j<bots_arg_size_1; j++)
row[i*bots_arg_size_1+j] = row[i*bots_arg_size_1+j] - row[i*bots_arg_size_1+k]*diag[k*bots_arg_size_1+j];
}
}
/***********************************************************************
* bmod:
**********************************************************************/
void bmod(float *row, float *col, float *inner)
{
int i, j, k;
for (i=0; i<bots_arg_size_1; i++)
for (j=0; j<bots_arg_size_1; j++)
for (k=0; k<bots_arg_size_1; k++)
inner[i*bots_arg_size_1+j] = inner[i*bots_arg_size_1+j] - row[i*bots_arg_size_1+k]*col[k*bots_arg_size_1+j];
}
/***********************************************************************
* fwd:
**********************************************************************/
void fwd(float *diag, float *col)
{
int i, j, k;
for (j=0; j<bots_arg_size_1; j++)
for (k=0; k<bots_arg_size_1; k++)
for (i=k+1; i<bots_arg_size_1; i++)
col[i*bots_arg_size_1+j] = col[i*bots_arg_size_1+j] - diag[i*bots_arg_size_1+k]*col[k*bots_arg_size_1+j];
}
void sparselu_init (float ***pBENCH, char *pass)
{
*pBENCH = (float **) malloc(bots_arg_size*bots_arg_size*sizeof(float *));
genmat(*pBENCH);
print_structure(pass, *pBENCH);
}
void sparselu_par_call(float **BENCH)
{
int ii, jj, kk;
bots_message("Computing SparseLU Factorization (%dx%d matrix with %dx%d blocks) ",
bots_arg_size,bots_arg_size,bots_arg_size_1,bots_arg_size_1);
const unsigned long long full_program_start = current_time_ns();
{
#pragma omp parallel
{
#pragma omp single nowait
{
#pragma omp task untied
for (kk=0; kk<bots_arg_size; kk++)
{
lu0(BENCH[kk*bots_arg_size+kk]);
for (jj=kk+1; jj<bots_arg_size; jj++)
if (BENCH[kk*bots_arg_size+jj] != NULL)
{
#pragma omp task untied firstprivate(kk, jj) shared(BENCH)
{
fwd(BENCH[kk*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj]);
}
}
for (ii=kk+1; ii<bots_arg_size; ii++)
if (BENCH[ii*bots_arg_size+kk] != NULL)
{
#pragma omp task untied firstprivate(kk, ii) shared(BENCH)
{
bdiv (BENCH[kk*bots_arg_size+kk], BENCH[ii*bots_arg_size+kk]);
}
}
#pragma omp taskwait
;
for (ii=kk+1; ii<bots_arg_size; ii++)
if (BENCH[ii*bots_arg_size+kk] != NULL)
for (jj=kk+1; jj<bots_arg_size; jj++)
if (BENCH[kk*bots_arg_size+jj] != NULL)
{
#pragma omp task untied firstprivate(kk, jj, ii) shared(BENCH)
{
if (BENCH[ii*bots_arg_size+jj]==NULL) BENCH[ii*bots_arg_size+jj] = allocate_clean_block();
bmod(BENCH[ii*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj], BENCH[ii*bots_arg_size+jj]);
}
}
#pragma omp taskwait
;
}
}
}
} ;
const unsigned long long full_program_end = current_time_ns();
printf("full_program %llu ns\n", full_program_end - full_program_start);
bots_message(" completed!\n");
}
void sparselu_seq_call(float **BENCH)
{
int ii, jj, kk;
for (kk=0; kk<bots_arg_size; kk++)
{
lu0(BENCH[kk*bots_arg_size+kk]);
for (jj=kk+1; jj<bots_arg_size; jj++)
if (BENCH[kk*bots_arg_size+jj] != NULL)
{
fwd(BENCH[kk*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj]);
}
for (ii=kk+1; ii<bots_arg_size; ii++)
if (BENCH[ii*bots_arg_size+kk] != NULL)
{
bdiv (BENCH[kk*bots_arg_size+kk], BENCH[ii*bots_arg_size+kk]);
}
for (ii=kk+1; ii<bots_arg_size; ii++)
if (BENCH[ii*bots_arg_size+kk] != NULL)
for (jj=kk+1; jj<bots_arg_size; jj++)
if (BENCH[kk*bots_arg_size+jj] != NULL)
{
if (BENCH[ii*bots_arg_size+jj]==NULL) BENCH[ii*bots_arg_size+jj] = allocate_clean_block();
bmod(BENCH[ii*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj], BENCH[ii*bots_arg_size+jj]);
}
}
}
void sparselu_fini (float **BENCH, char *pass)
{
print_structure(pass, BENCH);
}
int sparselu_check(float **SEQ, float **BENCH)
{
int ii,jj,ok=1;
for (ii=0; ((ii<bots_arg_size) && ok); ii++)
{
for (jj=0; ((jj<bots_arg_size) && ok); jj++)
{
if ((SEQ[ii*bots_arg_size+jj] == NULL) && (BENCH[ii*bots_arg_size+jj] != NULL)) ok = FALSE;
if ((SEQ[ii*bots_arg_size+jj] != NULL) && (BENCH[ii*bots_arg_size+jj] == NULL)) ok = FALSE;
if ((SEQ[ii*bots_arg_size+jj] != NULL) && (BENCH[ii*bots_arg_size+jj] != NULL))
ok = checkmat(SEQ[ii*bots_arg_size+jj], BENCH[ii*bots_arg_size+jj]);
}
}
if (ok) return BOTS_RESULT_SUCCESSFUL;
else return BOTS_RESULT_UNSUCCESSFUL;
}
|
MatrixOp.c | #include "../include/MatrixOp.h"
//==================== vector operations ===========================
// put random values into the matrix if mode == 0
struct Vector * new_vec(int length, int mode) {
struct Vector *vecp = (struct Vector *)malloc(sizeof(struct Vector));
vecp->length = length;
vecp->vec = (float*)malloc(vecp->length * sizeof(float));
for(int i = 0;i < length;i++)
vecp->vec[i] = mode ? 0 : (((float)rand()) / RAND_MAX);
return vecp;
}
void free_vec(struct Vector *vecp) {
free(vecp->vec);
free(vecp);
}
void print_vec(struct Vector *vecp) {
if(vecp == NULL) return ;
for(int i = 0;i < vecp->length;i++)
printf("%lf ", vecp->vec[i]);
printf("\n");
}
//==================== matrix operations ===========================
struct Matrix * new_mat(int m, int n, int mode) {
struct Matrix *matp = (struct Matrix*)malloc(sizeof(struct Matrix));
matp->m = m, matp->n = n;
matp->mat = (struct Vector**)malloc(m * sizeof(struct Vector*));
for(int i = 0;i < m;i++) {
matp->mat[i] = new_vec(n, mode);
}
return matp;
}
void free_mat(struct Matrix *matp) {
for(int i = 0;i < matp->m;i++)
free_vec(matp->mat[i]);
free(matp);
}
void print_mat(struct Matrix *matp) {
if(matp == NULL) return ;
for(int i = 0;i < matp->m;i++)
print_vec(matp->mat[i]);
printf("\n");
}
// take two corners and return the corresponding sub-matrix
struct Matrix * submat(struct Matrix *Mat, int upperLefty, int upperLeftx, int lowerRighty, int lowerRightx) {
struct Matrix *resMat = NULL;
if(upperLefty > lowerRighty || upperLeftx > lowerRightx) return resMat;
if(upperLefty < 0 || upperLeftx < 0 || lowerRighty >= Mat->m || lowerRightx >= Mat->n) return resMat;
resMat = new_mat(lowerRighty - upperLefty + 1, lowerRightx - upperLeftx + 1, 1);
int i, threadCount = omp_get_num_procs();
#pragma omp parallel for num_threads(threadCount) default(none) shared(upperLefty, upperLeftx, lowerRighty, lowerRightx, resMat, Mat) private(i)
for(i = upperLefty;i <= lowerRighty;i++)
for(int j = upperLeftx;j <= lowerRightx;j++) {
resMat->mat[i-upperLefty]->vec[j-upperLeftx] = Mat->mat[i]->vec[j];
}
return resMat;
}
// matrix addition/subtraction, optimized by OpenMP and AVX2
// add for mode == 0 and sub for mode == 1
int sign[2]={1,-1};
struct Matrix * matrix_add_sub(struct Matrix *mata, struct Matrix *matb, int mode) {
struct Matrix *resMat = NULL;
if(mata->m != matb->m || mata->n != matb->n)
return resMat;
resMat = new_mat(mata->m, mata->n, 1);
int i, threadCount = omp_get_num_procs();
#pragma omp parallel for num_threads(threadCount) shared(resMat, mata, matb, sign, mode) private(i)
for(i = 0;i < mata->m;i++) {
for(int k = 0;k < mata->n / 8;k++) {
__m256 intVec1 = _mm256_loadu_ps(&mata->mat[i]->vec[8*k]);
__m256 intVec2 = _mm256_loadu_ps(&matb->mat[i]->vec[8*k]);
__m256 intVec3 = mode ? _mm256_sub_ps(intVec1, intVec2) :
_mm256_add_ps(intVec1, intVec2);
_mm256_storeu_ps(&resMat->mat[i]->vec[8*k], intVec3);
}
for(int k = (mata->n / 8)*8;k < mata->n;k++)
resMat->mat[i]->vec[k] = mata->mat[i]->vec[k] + sign[mode] * matb->mat[i]->vec[k];
}
return resMat;
}
int mat_is_equal(struct Matrix *mata, struct Matrix *matb) {
if(mata->m != matb->m || mata->n != matb->n)
return 0;
for(int i = 0;i < mata->m;i++)
for(int j = 0;j < mata->n;j++)
if(abs(mata->mat[i]->vec[j]-matb->mat[i]->vec[j]) > 1e-3) {
#ifdef DEBUG
printf("%lf, %lf\n", mata->mat[i]->vec[j], matb->mat[i]->vec[j]);
#endif
return 0;
}
return 1;
}
// combine four sub-matrices into one matrix;
struct Matrix *combine_matrix(struct Matrix *upperLeftMat, struct Matrix *upperRightMat,
struct Matrix *lowerLeftMat, struct Matrix *lowerRightMat) {
struct Matrix *resMat = NULL;
if(upperLeftMat->n != lowerLeftMat->n || upperRightMat->n != lowerRightMat->n) return resMat;
if(upperLeftMat->m != upperRightMat->m || lowerLeftMat->m != lowerRightMat->m) return resMat;
resMat = new_mat(upperLeftMat->m + lowerLeftMat->m, upperLeftMat->n + upperRightMat->n, 1);
int i, threadCount = omp_get_num_procs();
#pragma omp parallel for num_threads(threadCount) default(none) shared(upperLeftMat, upperRightMat, resMat) private(i)
for(i = 0;i < upperLeftMat->m;i++) {
for(int j = 0;j < upperLeftMat->n;j++)
resMat->mat[i]->vec[j] = upperLeftMat->mat[i]->vec[j];
for(int j = 0;j < upperRightMat->n;j++)
resMat->mat[i]->vec[j+upperLeftMat->n] = upperRightMat->mat[i]->vec[j];
}
#pragma omp parallel for num_threads(threadCount) default(none) shared(upperLeftMat, lowerLeftMat, lowerRightMat, resMat) private(i)
for(i = 0;i < lowerLeftMat->m;i++) {
for(int j = 0;j < lowerLeftMat->n;j++)
resMat->mat[i+upperLeftMat->m]->vec[j] = lowerLeftMat->mat[i]->vec[j];
for(int j = 0;j < lowerRightMat->n;j++)
resMat->mat[i+upperLeftMat->m]->vec[j+upperLeftMat->n] = lowerRightMat->mat[i]->vec[j];
}
return resMat;
}
// copy matrix a into matrix b
int copy_matrix(struct Matrix *mata, struct Matrix *matb, int uly, int ulx, int dy, int dx) {
if(uly < 0 || ulx < 0 || dy < 0 || dx < 0 || dy > mata->m || dx > mata->n ||
uly + dy > matb->m || ulx + dx > matb->n)
return 0;
int y, x, threadCount = omp_get_num_procs(), i;
#pragma omp parallel for num_threads(threadCount) default(none) shared(uly, ulx, dy, dx, mata, matb) private(i, x, y)
for(i = 0;i < dy;i++)
for(int j = 0;j < dx;j++) {
y = uly + i, x = ulx + j;
matb->mat[y]->vec[x] = mata->mat[i]->vec[j];
}
}
// optimization for the brutal multiplication
void mul_1_by_1(struct Matrix *resMat, struct Matrix *mata, struct Matrix *matb, int i, int j) {
register float res = 0;
for(int k = 0;k < mata->n;k++)
res += mata->mat[i]->vec[k] * matb->mat[k]->vec[j];
resMat->mat[i]->vec[j] = res;
}
void mul_1_by_8(struct Matrix *resMat, struct Matrix *mata, struct Matrix *matb, int i, int j) {
register float resMat0, resMat1, resMat2, resMat3, resMat4, resMat5, resMat6, resMat7, mata0;
resMat0 = resMat1 = resMat2 = resMat3 = resMat4 = resMat5 = resMat6 = resMat7 = 0;
for(int k = 0;k < mata->n;k++) {
mata0 = mata->mat[i]->vec[k];
resMat0 += mata0 * matb->mat[k]->vec[j];
resMat1 += mata0 * matb->mat[k]->vec[j+1];
resMat2 += mata0 * matb->mat[k]->vec[j+2];
resMat3 += mata0 * matb->mat[k]->vec[j+3];
resMat4 += mata0 * matb->mat[k]->vec[j+4];
resMat5 += mata0 * matb->mat[k]->vec[j+5];
resMat6 += mata0 * matb->mat[k]->vec[j+6];
resMat7 += mata0 * matb->mat[k]->vec[j+7];
}
resMat->mat[i]->vec[j] = resMat0;
resMat->mat[i]->vec[j+1] = resMat1;
resMat->mat[i]->vec[j+2] = resMat2;
resMat->mat[i]->vec[j+3] = resMat3;
resMat->mat[i]->vec[j+4] = resMat4;
resMat->mat[i]->vec[j+5] = resMat5;
resMat->mat[i]->vec[j+6] = resMat6;
resMat->mat[i]->vec[j+7] = resMat7;
}
void mul_8_by_1(struct Matrix *resMat, struct Matrix *mata, struct Matrix *matb, int i, int j) {
mul_1_by_1(resMat, mata, matb, i, j);
mul_1_by_1(resMat, mata, matb, i+1, j);
mul_1_by_1(resMat, mata, matb, i+2, j);
mul_1_by_1(resMat, mata, matb, i+3, j);
mul_1_by_1(resMat, mata, matb, i+4, j);
mul_1_by_1(resMat, mata, matb, i+5, j);
mul_1_by_1(resMat, mata, matb, i+6, j);
mul_1_by_1(resMat, mata, matb, i+7, j);
}
void mul_8_by_8(struct Matrix *resMat, struct Matrix *mata, struct Matrix *matb, int i, int j) {
__m256 vecb, veca0, veca1, veca2, veca3, veca4, veca5, veca6, veca7,
resMat0, resMat1, resMat2, resMat3, resMat4, resMat5, resMat6, resMat7;
for(int k = 0;k < mata->n;k++) {
vecb = _mm256_loadu_ps(&matb->mat[k]->vec[j]);
veca0 = _mm256_broadcast_ss(&mata->mat[i]->vec[k]);
veca1 = _mm256_broadcast_ss(&mata->mat[i+1]->vec[k]);
veca2 = _mm256_broadcast_ss(&mata->mat[i+2]->vec[k]);
veca3 = _mm256_broadcast_ss(&mata->mat[i+3]->vec[k]);
veca4 = _mm256_broadcast_ss(&mata->mat[i+4]->vec[k]);
veca5 = _mm256_broadcast_ss(&mata->mat[i+5]->vec[k]);
veca6 = _mm256_broadcast_ss(&mata->mat[i+6]->vec[k]);
veca7 = _mm256_broadcast_ss(&mata->mat[i+7]->vec[k]);
resMat0 = k ? _mm256_add_ps(resMat0, _mm256_mul_ps(veca0, vecb)) :
_mm256_mul_ps(veca0, vecb);
resMat1 = k ? _mm256_add_ps(resMat1, _mm256_mul_ps(veca1, vecb)) :
_mm256_mul_ps(veca1, vecb);
resMat2 = k ? _mm256_add_ps(resMat2, _mm256_mul_ps(veca2, vecb)) :
_mm256_mul_ps(veca2, vecb);
resMat3 = k ? _mm256_add_ps(resMat3, _mm256_mul_ps(veca3, vecb)) :
_mm256_mul_ps(veca3, vecb);
resMat4 = k ? _mm256_add_ps(resMat4, _mm256_mul_ps(veca4, vecb)) :
_mm256_mul_ps(veca4, vecb);
resMat5 = k ? _mm256_add_ps(resMat5, _mm256_mul_ps(veca5, vecb)) :
_mm256_mul_ps(veca5, vecb);
resMat6 = k ? _mm256_add_ps(resMat6, _mm256_mul_ps(veca6, vecb)) :
_mm256_mul_ps(veca6, vecb);
resMat7 = k ? _mm256_add_ps(resMat7, _mm256_mul_ps(veca7, vecb)) :
_mm256_mul_ps(veca7, vecb);
}
_mm256_storeu_ps(&resMat->mat[i]->vec[j], resMat0);
_mm256_storeu_ps(&resMat->mat[i+1]->vec[j], resMat1);
_mm256_storeu_ps(&resMat->mat[i+2]->vec[j], resMat2);
_mm256_storeu_ps(&resMat->mat[i+3]->vec[j], resMat3);
_mm256_storeu_ps(&resMat->mat[i+4]->vec[j], resMat4);
_mm256_storeu_ps(&resMat->mat[i+5]->vec[j], resMat5);
_mm256_storeu_ps(&resMat->mat[i+6]->vec[j], resMat6);
_mm256_storeu_ps(&resMat->mat[i+7]->vec[j], resMat7);
}
//==================== Other helper operations ===========================
int Min(int a, int b) { return a > b ? b : a; }
int Max(int a, int b) { return a > b ? a : b; } |
main.c |
/***************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
#include <parboil.h>
#include <stdio.h>
#include <stdlib.h>
#include <malloc.h>
#include <string.h>
#include "file.h"
#include "common.h"
#include "kernels.h"
#include "alignsize.h"
#ifdef SPEC
#include "specrand.h"
#endif
static int generate_data(float *A0, int nx, int ny, int nz, int reset)
{
#ifdef SPEC
spec_srand(54321);
int i,j,k;
for(k=0;k<nz;k++)
for(j=0;j<ny;j++)
for(i=0;i<nx;i++)
if(reset)
A0[Index3D(nx,ny,i,j,k)] = 0.0f;
else
A0[Index3D(nx,ny,i,j,k)] = (float) spec_genrand_real1();
return 0;
#else
srand(54321);
int i,j,k;
for(k=0;k<nz;k++)
for(j=0;j<ny;j++)
for(i=0;i<nx;i++)
if(reset)
A0[Index3D(nx,ny,i,j,k)] = 0.0f;
else
A0[Index3D(nx,ny,i,j,k)] = rand()/(float)RAND_MAX;
return 0;
#endif
}
static int read_data(float *A0, int nx,int ny,int nz,FILE *fp)
{
int s=0;
int i, j, k;
for(i=0;i<nz;i++)
{
for(j=0;j<ny;j++)
{
for(k=0;k<nx;k++)
{
fread(A0+s,sizeof(float),1,fp);
s++;
}
}
}
return 0;
}
int main(int argc, char** argv) {
struct pb_Parameters *parameters;
printf("CPU-based 7 points stencil codes****\n");
printf("Original version by Li-Wen Chang <lchang20@illinois.edu> and I-Jui Sung<sung10@illinois.edu>\n");
printf("This version maintained by Chris Rodrigues ***********\n");
parameters = pb_ReadParameters(&argc, argv);
//declaration
int nx,ny,nz;
int size;
int iteration;
//float c0=1.0f/6.0f;
//float c1=1.0f/6.0f/6.0f;
float c1=1.0f/7.0f;
float c0=-1.0f/7.0f;
if (argc<5)
{
printf("Usage: probe nx ny nz tx ty t\n"
"nx: the grid size x\n"
"ny: the grid size y\n"
"nz: the grid size z\n"
"t: the iteration time\n");
return -1;
}
nx = atoi(argv[1]);
if (nx<1) {
printf("ERROR: nx=%d\n",nx);
return -1;
}
ny = atoi(argv[2]);
if (ny<1) {
printf("ERROR: ny=%d\n",ny);
return -1;
}
nz = atoi(argv[3]);
if (nz<1) {
printf("ERROR: nz=%d\n",nz);
return -1;
}
iteration = atoi(argv[4]);
if(iteration<1) {
printf("ERROR: iteration=%d\n",iteration);
return -1;
}
printf("[%s:%d] nx[%d] ny[%d] nz[%d] iter[%d]\n", __FILE__, __LINE__, nx, ny, nz, iteration);
//host data
float *h_A0;
float *h_Anext;
size=nx*ny*nz;
size_t alignment = SPEC_ALIGNMENT_SIZE;
h_A0=(float*)memalign(alignment, sizeof(float)*size);
h_Anext=(float*)memalign(alignment, sizeof(float)*size);
/*
FILE *fp = fopen(parameters->inpFiles[0], "rb");
read_data(h_A0, nx,ny,nz,fp);
fclose(fp);
memcpy (h_Anext,h_A0 ,sizeof(float)*size);
*/
generate_data(h_A0,nx,ny,nz,0);
generate_data(h_Anext,nx,ny,nz,0);
#pragma omp target data map(to: h_A0[0:size]) map(tofrom: h_Anext[0:size])
{
int t;
for(t=0;t<iteration;t++){
cpu_stencil(c0, c1, h_A0, h_Anext, nx, ny, nz);
float *temp=h_A0;
h_A0 = h_Anext;
h_Anext = temp;
}
}
if (parameters->outFile) {
outputData(parameters->outFile,h_Anext,nx,ny,nz);
}
free (h_A0);
free (h_Anext);
pb_FreeParameters(parameters);
return 0;
}
|
Ant-Colony.c | /*
Author: Makarios Christakis
Description:
Parallel implementation of the ant colony optimization algorithm for the
travelling salesman problem. The criterion by which the algorithm converges
is the %change in the average distance travelled by all the ant agents.
For the parameters below, the algorithm converges after 3 iterations
with:
Min Path Length: 4458074.00
Average Path Length: 4501838.00
Timed using time() on a 7th gen i7, Ubuntu 18.04 machine we get:
real 19m12,759s
user 129m38,110s
sys 0m1,071s
The serial implementation ran for about 80 minutes on the same machine
so we have reduced the runtime by a factor of 4.
*/
#include <math.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
// **********************************************************
// DEFINITIONS
#define N_POINTS 10000 //Number of cities to generate
#define N_AGENTS 8 // Number of ant agents
#define P 0.5 // Pheromone evaporation rate
#define PHEROMONE_INIT_VAL (float)1 // Initial pheromone values
// **********************************************************
// STRUCTS
struct AntAgent {
float pathLength; //distance travelled
int city_flags[N_POINTS]; //available = 1, unavailable = 0
int route[N_POINTS];
int initialCity;
int currentCity;
};
// **********************************************************
// GLOBAL VARS
float cities[N_POINTS][2] = {0};
float minPathLength = 0;
float avgPathLength = 0;
struct AntAgent ants[N_AGENTS];
float pheromones[N_POINTS][N_POINTS];
unsigned int seed = 159852753;
// **********************************************************
// Random unsigned int generator
unsigned int randUint() {
#pragma omp threadprivate(seed)
seed = seed * 1103515245 + 12345;
return seed;
}
// **********************************************************
// Initialises the city coordinate vectors.
void initVec() {
for (int i = 0; i < N_POINTS; i++) {
cities[i][0] = (float)rand() / RAND_MAX * 1e3;
cities[i][1] = (float)rand() / RAND_MAX * 1e3;
}
}
// Initialises pheromones.
void initPheromones() {
#pragma omp parallel for
for (int i = 0; i < N_POINTS; i++) {
for (int j = 0; j < N_POINTS; j++) {
pheromones[i][j] = PHEROMONE_INIT_VAL;
}
}
}
// Resets each ant's parameters.
void resetAgents() {
for (int i = 0; i < N_AGENTS; i++) {
ants[i].pathLength = 0;
memset(&ants[i].city_flags[0], 1, N_POINTS * sizeof(int));
int register tmp = (int)rand() % N_POINTS;
ants[i].initialCity = tmp;
ants[i].currentCity = tmp;
ants[i].route[0] = tmp;
ants[i].city_flags[tmp] = 0;
}
}
// **********************************************************
// Euclidean distance calculation between 2 points in the grid.
float dist(int p1, int p2) {
float register dx = cities[p1][0] - cities[p2][0];
float register dy = cities[p1][1] - cities[p2][1];
return (float)sqrt(dx * dx + dy * dy);
}
// Make each agent run through all the cities according to the algorithm's rules.
void releaseAgents() {
#pragma omp parallel for
for (int i = 0; i < N_AGENTS; i++) {
float city_probs[N_POINTS] = {0};
for (int j = 0; j < N_POINTS - 1; j++) {
int register curr = ants[i].currentCity;
float prob = (float)randUint() / __UINT32_MAX__;
float denominator = 0;
//First pass from all available cities.
for (int k = 0; k < N_POINTS; k++) {
if (ants[i].city_flags[k]) {
float len = dist(curr, k);
len = 1.0 / len;
float register tmp = sqrt(pheromones[curr][k] * len);
/*ants[i].*/ city_probs[k] = tmp;
denominator += tmp;
}
}
prob *= denominator;
float cumulativeProb = 0;
// Probabilistic choice of next city to visit.
for (int k = 0; k < N_POINTS; k++) {
if (ants[i].city_flags[k]) {
cumulativeProb += city_probs[k];
if (prob < cumulativeProb) {
// Move to city
ants[i].city_flags[k] = 0;
ants[i].pathLength += dist(curr, k);
ants[i].currentCity = k;
ants[i].route[j + 1] = k;
break;
}
}
}
}
//printf("agent = %d\tcurrCity = %d\tinitCity = %d\n",i,ants[i].currentCity,ants[i].initialCity);
ants[i].pathLength += dist(ants[i].currentCity, ants[i].initialCity);
}
}
// Calculates the new values of all the pheromones.
void updatePheromones() {
#pragma omp parallel for
for (int i = 0; i < N_POINTS; i++) {
for (int j = 0; j < N_POINTS; j++) {
//For each path between two points calculate the sum
//of the distances of all ants that passed from it
float sumDist = 0;
for (int k = 0; k < N_AGENTS; k++) {
for (int q = 0; q < N_POINTS; q++) {
if (ants[k].route[q] == j) {
if (ants[k].route[q - 1] == i) {
sumDist += ants[k].pathLength;
}
break;
}
}
}
if (sumDist != 0)
pheromones[i][j] = (1 - P) * pheromones[i][j] + 1.0 / sumDist;
else
pheromones[i][j] = (1 - P) * pheromones[i][j];
}
}
}
int main() {
#pragma omp threadprivate(seed)
float prevAvg = 1e9;
float sum = 0;
int iter = 1; //iteration number
initVec();
initPheromones();
printf("INITIALIZED EVERYTHING\n");
do {
resetAgents();
releaseAgents();
updatePheromones();
minPathLength = ants[0].pathLength;
sum = ants[0].pathLength;
for (int i = 1; i < N_AGENTS; i++) {
if (ants[i].pathLength < minPathLength) {
minPathLength = ants[i].pathLength;
}
sum += ants[i].pathLength;
}
prevAvg = avgPathLength;
avgPathLength = sum / N_AGENTS;
iter++;
} while (abs(avgPathLength - prevAvg) / prevAvg > 0.01);
printf("Iterations: %d\tMin Path Length: %.2f\tAverage Path: %.2f\n", iter, minPathLength, avgPathLength);
return 0;
} |
simpois.c | #include <assert.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdbool.h>
// utility function that always returns a valid pointer to memory
static void *xmalloc(size_t n)
{
void *new = malloc(n);
if (!new)
{
fprintf(stderr, "xmalloc: can not malloc %zu bytes\n", n);
exit(1);
}
return new;
}
// the type of a "getpixel" function
typedef float (*getpixel_operator)(float*,int,int,int,int);
// extrapolate by nearest value (useful for Neumann boundary conditions)
static float getpixel_1(float *I, int w, int h, int i, int j)
{
if (i < 0) i = 0;
if (j < 0) j = 0;
if (i >= w) i = w-1;
if (j >= h) j = h-1;
return I[i+j*w];
}
// evaluate the laplacian of image I at point i, j
static float laplacian(float *I, int w, int h, int i, int j)
{
getpixel_operator p = getpixel_1;
float r = -4 * p(I, w, h, i , j )
+ p(I, w, h, i+1, j )
+ p(I, w, h, i , j+1)
+ p(I, w, h, i-1, j )
+ p(I, w, h, i , j-1);
return r;
}
static float bilaplacian(float *x, int w, int h, int i, int j)
{
float r = -4 * laplacian(x, w, h, i , j )
+ laplacian(x, w, h, i+1, j )
+ laplacian(x, w, h, i , j+1)
+ laplacian(x, w, h, i-1, j )
+ laplacian(x, w, h, i , j-1);
return r;
}
// evaluate the laplacian of image I at point i, j
// (alternative function, compatible with neumann boundaries)
static float laplacian_neum(float *I, int w, int h, int i, int j)
{
getpixel_operator p = getpixel_1;
float x = p(I, w, h, i , j );
float a = p(I, w, h, i+1, j );
float b = p(I, w, h, i , j+1);
float c = p(I, w, h, i-1, j );
float d = p(I, w, h, i , j-1);
float r = 0;
if (isfinite(a)) r += a -x;
if (isfinite(b)) r += b -x;
if (isfinite(c)) r += c -x;
if (isfinite(d)) r += d -x;
return r;
}
// perform one gauss-seidel iteration in-place on the data I
static void gauss_seidel_iteration(float *I, float *f, int w, int h,
int (*omega)[2], int n_omega, float tstep)
{
getpixel_operator op = laplacian_neum;
if (tstep < 0)
op = bilaplacian;
//#pragma omp parallel for
for (int p = 0; p < n_omega; p++)
{
int i = omega[p][0];
int j = omega[p][1];
int ij = j*w + i;
float l = op(I, w, h, i, j);
float d = f ? f[ij] : 0;
I[ij] = I[ij] + tstep * (l - d);
}
}
// build a mask of the NAN positions on image "x"
// the output "mask[i][2]" contains the two coordinates of the ith masked pixel
static int (*build_mask(int *out_nmask, float *x, int w, int h))[2]
{
int nmask = 0;
for (int i = 0; i < w*h; i++)
if (isnan(x[i]))
nmask += 1;
int (*mask)[2] = xmalloc(w*h*2*sizeof(int)), cx = 0;
for (int j = 0; j < h; j++)
for (int i = 0; i < w; i++)
if (isnan(x[j*w + i])) {
mask[cx][0] = i;
mask[cx][1] = j;
cx += 1;
}
assert(cx == nmask);
*out_nmask = nmask;
return mask;
}
// iterative poisson solver with initialization
static void poisson_extension_with_init(
float *u, // output image
float *f, // interior Poisson data
float *g, // input image with boundary data (NAN = holes)
int w, // image width
int h, // image height
float timestep, // time step for the numerical scheme
int niter, // number of iterations to run
float *initialization
)
{
// build the list of pixels inside the region of interest
int n_omega, (*omega)[2] = build_mask(&n_omega, g, w, h);
// initialize the solution to the given data outside the ROI
for (int i = 0; i < w*h; i++)
u[i] = isfinite(g[i]) ? g[i] : initialization[i];
// perform the requested iterations
for (int i = 0; i < niter; i++)
gauss_seidel_iteration(u, f, w, h, omega, n_omega, timestep);
// cleanup
free(omega);
}
// zoom-out by 2x2 block averages
// NANs are discarded when possible
static void zoom_out_by_factor_two(float *out, int ow, int oh,
float *in, int iw, int ih)
{
if (!out || !in) return;
getpixel_operator p = getpixel_1;
assert(abs(2*ow-iw) < 2);
assert(abs(2*oh-ih) < 2);
for (int j = 0; j < oh; j++)
for (int i = 0; i < ow; i++)
{
float a[4], m = 0;
a[0] = p(in, iw, ih, 2*i, 2*j);
a[1] = p(in, iw, ih, 2*i+1, 2*j);
a[2] = p(in, iw, ih, 2*i, 2*j+1);
a[3] = p(in, iw, ih, 2*i+1, 2*j+1);
int cx = 0;
for (int k = 0; k < 4; k++)
if (isfinite(a[k])) {
m += a[k];
cx += 1;
}
out[ow*j + i] = cx ? m/cx : NAN;
}
}
// evaluate a bilinear cell at the given point
static float evaluate_bilinear_cell(float a, float b, float c, float d,
float x, float y)
{
float r = 0;
r += a * (1-x) * (1-y);
r += b * ( x ) * (1-y);
r += c * (1-x) * ( y );
r += d * ( x ) * ( y );
return r;
}
// evaluate an image at a sub-pixel position, using bilinear interpolation
static float bilinear_interpolation(float *x, int w, int h, float p, float q)
{
int ip = floor(p); // note: a cast to int fails when p<0
int iq = floor(q);
float a = getpixel_1(x, w, h, ip , iq );
float b = getpixel_1(x, w, h, ip+1, iq );
float c = getpixel_1(x, w, h, ip , iq+1);
float d = getpixel_1(x, w, h, ip+1, iq+1);
float r = evaluate_bilinear_cell(a, b, c, d, p-ip, q-iq);
return r;
}
// zoom-in by replicating pixels into 2x2 blocks
// no NAN's are expected in the input image
static void zoom_in_by_factor_two(float *out, int ow, int oh,
float *in, int iw, int ih)
{
if (!out || !in) return;
assert(abs(2*iw-ow) < 2);
assert(abs(2*ih-oh) < 2);
for (int j = 0; j < oh; j++)
for (int i = 0; i < ow; i++)
{
float x = (i - 0.5) / 2;
float y = (j - 0.5) / 2;
out[ow*j+i] = bilinear_interpolation(in, iw, ih, x, y);
//out[ow*j+i] = getpixel_1(in, iw, ih, i/2, j/2);
}
}
#include "cleant_cgpois.c"
#include "smapa.h"
SMART_PARAMETER(PMSFAC,3)
SMART_PARAMETER(PONLIT,0)
void poisson_rec(float *u, float *g, float *f, int w, int h,
float tstep, int niter, int scale, int cgit)
{
//fprintf(stderr, "PREC %dx%d (niter,scale,cgit)=(%d %d %d)\n",
// w, h, niter, scale, cgit);
float *init = xmalloc(w*h*sizeof*init);
if (scale > 1 && (w > 1 || h > 1))
{
int ws = ceil(w/2.0);
int hs = ceil(h/2.0);
float *gs = xmalloc(ws * hs * sizeof*gs);
float *fs = f ? xmalloc(ws * hs * sizeof*fs) : f;
float *us = xmalloc(ws * hs * sizeof*us);
zoom_out_by_factor_two(gs, ws, hs, g, w, h);
zoom_out_by_factor_two(fs, ws, hs, f, w, h);
if (f) for (int i = 0; i < ws*hs; i++) fs[i] *= PMSFAC();
poisson_rec(us, gs, fs, ws, hs, tstep, niter, scale-1, cgit);
zoom_in_by_factor_two(init, w, h, us, ws, hs);
free(gs);
if(f) free(fs);
free(us);
} else {
for (int i = 0 ; i < w*h; i++)
init[i] = 0;
}
if (PONLIT() && PONLIT() != w)
niter = cgit = 0;
poisson_extension_with_init(u, f, g, w, h, tstep, niter, init);
free(init);
if (cgit) { // if requested, refine by Conjugate Gradient
float cg_eps = 1e-6;
poisson_extension_by_cg(u, g, f, w, h, u, cgit, cg_eps);
}
}
// extension by Poisson equation of each channel of a color image
void poisson_solver_separable(float *out, float *in, float *dat,
int w, int h, int pd,
float tstep, int niter, int scale, float cgrad)
{
for (int l = 0; l < pd; l++)
{
float *outl = out + w*h*l;
float *inl = in + w*h*l;
float *datl = dat ? dat + w*h*l : dat;
poisson_rec(outl, inl, datl, w, h, tstep, niter, scale, cgrad);
}
}
// extension by Poisson equation of each channel of a color image
void poisson_solver_separable_simplest(float *out, float *in, float *dat,
int w, int h, int pd, int niter)
{
for (int l = 0; l < pd; l++)
{
float *outl = out + w*h*l;
float *inl = in + w*h*l;
float *datl = dat ? dat + w*h*l : dat;
poisson_rec(outl, inl, datl, w, h, .25, 10, 99, 0);
}
}
//#define MAIN_IPOL_POISSON
#ifdef MAIN_IPOL_POISSON
#include "iio.h" // library for image input/output
#include "pickopt.c" // function for extracting named command line arguments
int main(int argc, char *argv[])
{
// extract named arguments
float tstep = atof(pick_option(&argc, &argv, "t", "0.25"));
float niter = atof(pick_option(&argc, &argv, "n", "10"));
float nscal = atof(pick_option(&argc, &argv, "s", "99"));
float cgrad = atof(pick_option(&argc, &argv, "c", "0"));
char *filename_i = pick_option(&argc, &argv, "i", "-"); // stdin
char *filename_o = pick_option(&argc, &argv, "o", "-"); // stdout
char *filename_m = pick_option(&argc, &argv, "m", "");
char *filename_f = pick_option(&argc, &argv, "f", "");
// if any arguments are left, print a help message and quit
if (argc > 1) {
fprintf(stderr, "Usage:\n\t%s [options]\n", *argv);
fprintf(stderr, "\nComputes approximate solutions of Poisson"
" or Laplace equations for images\n");
fprintf(stderr, "\nOptions with their default values:\n"
"\t-i stdin Input image with boundary data\n"
"\t-f (zeros) Optional image with Poisson data term\n"
"\t-m (zeros) Optional image with region of interest\n"
"\t-o stdout Output image\n"
"\t-t 0.25 Time step for Gauss-Seidel iterations\n"
"\t-n 10 Number of Gauss-Seidel iterations\n"
"\t-s 99 Number of Multi-Scale octaves\n"
"\t-c 0 Number of Conjugate Gradient iterations\n"
);
fprintf(stderr, "\nNote: NAN values in the input image"
" are added to the region of interest\n");
return 1;
}
// read input image ("boundary")
int w, h, pd;
float *img_i = NULL;
if (*filename_i)
img_i = iio_read_image_float_split(filename_i, &w, &h, &pd);
// if requested, read data image
float *img_f = NULL;
if (*filename_f) {
int w2, h2, pd2;
img_f = iio_read_image_float_split(filename_f, &w2, &h2, &pd2);
if (!img_i) {
w = w2; h = h2; pd = pd2;
img_i = xmalloc(w * h * pd * sizeof*img_i);
for (int i = 0; i < w*h*pd; i++) img_i[i] = NAN;
}
else if (w != w2 || h != h2 || pd != pd2)
return fprintf(stderr, "input sizes mismatch (i,f)");
}
// if requested, read mask image
float *img_m = NULL;
if (*filename_m) {
int w2, h2;
img_m = iio_read_image_float(filename_m, &w2, &h2);
if (w != w2 || h != h2)
return fprintf(stderr, "input sizes mismatch (i,m)");
}
// alloc space for output image
float *out = xmalloc(w * h * pd * sizeof*out);
// apply mask, if it exists
if (img_m)
for (int i = 0; i < w * h * pd; i++)
if (img_m[i % (w*h)] > 0)
img_i[i] = NAN;
// run the algorithm
poisson_solver_separable(out, img_i, img_f, w, h, pd,
tstep, niter, nscal, cgrad);
// save the output image
iio_save_image_float_split(filename_o, out, w, h, pd);
// cleanup and exit
free(out);
free(img_i);
if (img_m) free(img_m);
if (img_f) free(img_f);
return 0;
}
#endif//MAIN_IPOL_POISSON
|
GB_unaryop__ainv_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_fp32_fp32
// op(A') function: GB_tran__ainv_fp32_fp32
// C type: float
// A type: float
// cast: float cij = (float) aij
// unaryop: cij = -aij
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, aij) \
float z = (float) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_fp32_fp32
(
float *Cx, // Cx and Ax may be aliased
float *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_fp32_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
parallel_for.h | /*!
* Copyright (c) 2021 by Contributors
* \file runtime/container.h
* \brief Defines the container object data structures.
*/
#ifndef DGL_RUNTIME_PARALLEL_FOR_H_
#define DGL_RUNTIME_PARALLEL_FOR_H_
#include <dmlc/omp.h>
#include <algorithm>
#include <string>
#include <cstdlib>
#include <exception>
#include <vector>
#include <atomic>
namespace {
int64_t divup(int64_t x, int64_t y) {
return (x + y - 1) / y;
}
}
namespace dgl {
namespace runtime {
namespace {
size_t compute_num_threads(size_t begin, size_t end, size_t grain_size) {
if (omp_in_parallel() || end - begin <= grain_size || end - begin == 1)
return 1;
return std::min(static_cast<int64_t>(omp_get_max_threads()), divup(end - begin, grain_size));
}
struct DefaultGrainSizeT {
size_t grain_size;
DefaultGrainSizeT() {
auto var = std::getenv("DGL_PARALLEL_FOR_GRAIN_SIZE");
if (!var) {
grain_size = 1;
} else {
grain_size = std::stoul(var);
}
}
size_t operator()() {
return grain_size;
}
};
} // namespace
static DefaultGrainSizeT default_grain_size;
/*!
* \brief OpenMP-based parallel for loop.
*
* It requires each thread's workload to have at least \a grain_size elements.
* The loop body will be a function that takes in two arguments \a begin and \a end, which
* stands for the starting (inclusive) and ending index (exclusive) of the workload.
*/
template <typename F>
void parallel_for(
const size_t begin,
const size_t end,
const size_t grain_size,
F&& f) {
if (begin >= end) {
return;
}
#ifdef _OPENMP
auto num_threads = compute_num_threads(begin, end, grain_size);
// (BarclayII) the exception code is borrowed from PyTorch.
std::atomic_flag err_flag = ATOMIC_FLAG_INIT;
std::exception_ptr eptr;
#pragma omp parallel num_threads(num_threads)
{
auto tid = omp_get_thread_num();
auto chunk_size = divup((end - begin), num_threads);
auto begin_tid = begin + tid * chunk_size;
if (begin_tid < end) {
auto end_tid = std::min(end, chunk_size + begin_tid);
try {
f(begin_tid, end_tid);
} catch (...) {
if (!err_flag.test_and_set())
eptr = std::current_exception();
}
}
}
if (eptr)
std::rethrow_exception(eptr);
#else
f(begin, end);
#endif
}
/*!
* \brief OpenMP-based parallel for loop with default grain size.
*
* parallel_for with grain size to default value, either 1 or controlled through
* environment variable DGL_PARALLEL_FOR_GRAIN_SIZE.
* If grain size is set to 1, the function behaves the same way as OpenMP
* parallel for pragma with static scheduling.
*/
template <typename F>
void parallel_for(
const size_t begin,
const size_t end,
F&& f) {
parallel_for(begin, end, default_grain_size(), std::forward<F>(f));
}
/*!
* \brief OpenMP-based two-stage parallel reduction.
*
* The first-stage reduction function \a f works in parallel. Each thread's workload has
* at least \a grain_size elements. The loop body will be a function that takes in
* the starting index (inclusive), the ending index (exclusive), and the reduction identity.
*
* The second-stage reduction function \a sf is a binary function working in the main
* thread. It aggregates the partially reduced result computed from each thread.
*
* Example to compute a parallelized max reduction of an array \c a:
*
* parallel_reduce(
* 0, // starting index
* 100, // ending index
* 1, // grain size
* -std::numeric_limits<float>::infinity, // identity
* [&a] (int begin, int end, float ident) { // first-stage partial reducer
* float result = ident;
* for (int i = begin; i < end; ++i)
* result = std::max(result, a[i]);
* return result;
* },
* [] (float result, float partial_result) {
* return std::max(result, partial_result);
* });
*/
template <typename DType, typename F, typename SF>
DType parallel_reduce(
const size_t begin,
const size_t end,
const size_t grain_size,
const DType ident,
const F& f,
const SF& sf) {
if (begin >= end) {
return ident;
}
int num_threads = compute_num_threads(begin, end, grain_size);
if (num_threads == 1) {
return f(begin, end, ident);
}
std::vector<DType> results(num_threads, ident);
std::atomic_flag err_flag = ATOMIC_FLAG_INIT;
std::exception_ptr eptr;
#pragma omp parallel num_threads(num_threads)
{
auto tid = omp_get_thread_num();
auto chunk_size = divup((end - begin), num_threads);
auto begin_tid = begin + tid * chunk_size;
if (begin_tid < end) {
auto end_tid = std::min(end, static_cast<size_t>(chunk_size + begin_tid));
try {
results[tid] = f(begin_tid, end_tid, ident);
} catch (...) {
if (!err_flag.test_and_set())
eptr = std::current_exception();
}
}
}
if (eptr)
std::rethrow_exception(eptr);
DType out = ident;
for (int64_t i = 0; i < num_threads; ++i)
out = sf(out, results[i]);
return out;
}
} // namespace runtime
} // namespace dgl
#endif // DGL_RUNTIME_PARALLEL_FOR_H_
|
boundary_matrix.h | /* Copyright 2013 IST Austria
Contributed by: Ulrich Bauer, Michael Kerber, Jan Reininghaus
This file is part of PHAT.
PHAT is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PHAT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with PHAT. If not, see <http://www.gnu.org/licenses/>. */
#pragma once
#include <phat/helpers/misc.h>
#include <phat/representations/bit_tree_pivot_column.h>
// interface class for the main data structure -- implementations of the interface can be found in ./representations
namespace phat {
template< class Representation = bit_tree_pivot_column >
class boundary_matrix
{
protected:
Representation rep;
// interface functions -- actual implementation and complexity depends on chosen @Representation template
public:
// get overall number of columns in boundary_matrix
index get_num_cols() const { return rep._get_num_cols(); }
// set overall number of columns in boundary_matrix
void set_num_cols( index nr_of_columns ) { rep._set_num_cols( nr_of_columns ); }
// get dimension of given index
dimension get_dim( index idx ) const { return rep._get_dim( idx ); }
// set dimension of given index
void set_dim( index idx, dimension dim ) { rep._set_dim( idx, dim ); }
// replaces content of @col with boundary of given index
void get_col( index idx, column& col ) const { col.clear(); rep._get_col( idx, col ); }
// set column @idx to the values contained in @col
void set_col( index idx, const column& col ) { rep._set_col( idx, col ); }
// true iff boundary of given column is empty
bool is_empty( index idx ) const { return rep._is_empty( idx ); }
// largest index of given column (new name for lowestOne()) -- NOT thread-safe
index get_max_index( index idx ) const { return rep._get_max_index( idx ); }
// removes maximal index from given column
void remove_max( index idx ) { rep._remove_max( idx ); }
// adds column @source to column @target'
void add_to( index source, index target ) { rep._add_to( source, target ); }
// clears given column
void clear( index idx ) { rep._clear( idx ); }
// finalizes given column
void finalize( index idx ) { rep._finalize( idx ); }
// syncronizes all internal data structures -- has to be called before and after any multithreaded access!
void sync() { rep._sync(); }
// info functions -- independent of chosen 'Representation'
public:
// maximal dimension
dimension get_max_dim() const {
dimension cur_max_dim = 0;
for( index idx = 0; idx < get_num_cols(); idx++ )
cur_max_dim = get_dim( idx ) > cur_max_dim ? get_dim( idx ) : cur_max_dim;
return cur_max_dim;
}
// number of nonzero rows for given column @idx
index get_num_rows( index idx ) const {
column cur_col;
get_col( idx, cur_col );
return cur_col.size();
}
// maximal number of nonzero rows of all columns
index get_max_col_entries() const {
index max_col_entries = -1;
const index nr_of_columns = get_num_cols();
for( index idx = 0; idx < nr_of_columns; idx++ )
max_col_entries = get_num_rows( idx ) > max_col_entries ? get_num_rows( idx ) : max_col_entries;
return max_col_entries;
}
// maximal number of nonzero cols of all rows
index get_max_row_entries() const {
size_t max_row_entries = 0;
const index nr_of_columns = get_num_cols();
std::vector< std::vector< index > > transposed_matrix( nr_of_columns );
column temp_col;
for( index cur_col = 0; cur_col < nr_of_columns; cur_col++ ) {
get_col( cur_col, temp_col );
for( index idx = 0; idx < (index)temp_col.size(); idx++)
transposed_matrix[ temp_col[ idx ] ].push_back( cur_col );
}
for( index idx = 0; idx < nr_of_columns; idx++ )
max_row_entries = transposed_matrix[ idx ].size() > max_row_entries ? transposed_matrix[ idx ].size() : max_row_entries;
return max_row_entries;
}
// overall number of entries in the matrix
index get_num_entries() const {
index number_of_nonzero_entries = 0;
const index nr_of_columns = get_num_cols();
for( index idx = 0; idx < nr_of_columns; idx++ )
number_of_nonzero_entries += get_num_rows( idx );
return number_of_nonzero_entries;
}
// operators / constructors
public:
boundary_matrix() {};
template< class OtherRepresentation >
boundary_matrix( const boundary_matrix< OtherRepresentation >& other ) {
*this = other;
}
template< typename OtherRepresentation >
bool operator==( const boundary_matrix< OtherRepresentation >& other_boundary_matrix ) const {
const index number_of_columns = this->get_num_cols();
if( number_of_columns != other_boundary_matrix.get_num_cols() )
return false;
column temp_col;
column other_temp_col;
for( index idx = 0; idx < number_of_columns; idx++ ) {
this->get_col( idx, temp_col );
other_boundary_matrix.get_col( idx, other_temp_col );
if( temp_col != other_temp_col || this->get_dim( idx ) != other_boundary_matrix.get_dim( idx ) )
return false;
}
return true;
}
template< typename OtherRepresentation >
bool operator!=( const boundary_matrix< OtherRepresentation >& other_boundary_matrix ) const {
return !( *this == other_boundary_matrix );
}
template< typename OtherRepresentation >
boundary_matrix< Representation >& operator=( const boundary_matrix< OtherRepresentation >& other )
{
const index nr_of_columns = other.get_num_cols();
this->set_num_cols( nr_of_columns );
column temp_col;
for( index cur_col = 0; cur_col < nr_of_columns; cur_col++ ) {
this->set_dim( cur_col, other.get_dim( cur_col ) );
other.get_col( cur_col, temp_col );
this->set_col( cur_col, temp_col );
}
// by convention, always return *this
return *this;
}
// I/O -- independent of chosen 'Representation'
public:
// initializes boundary_matrix from (vector<vector>, vector) pair -- untested
template< typename index_type, typename dimemsion_type >
void load_vector_vector( const std::vector< std::vector< index_type > >& input_matrix, const std::vector< dimemsion_type >& input_dims ) {
const index nr_of_columns = (index)input_matrix.size();
this->set_num_cols( nr_of_columns );
column temp_col;
#pragma omp parallel for private( temp_col )
for( index cur_col = 0; cur_col < nr_of_columns; cur_col++ ) {
this->set_dim( cur_col, (dimension)input_dims[ cur_col ] );
index num_rows = input_matrix[ cur_col ].size();
temp_col.resize( num_rows );
for( index cur_row = 0; cur_row < num_rows; cur_row++ )
temp_col[ cur_row ] = (index)input_matrix[ cur_col ][ cur_row ];
this->set_col( cur_col, temp_col );
}
}
template< typename index_type, typename dimemsion_type >
void save_vector_vector( std::vector< std::vector< index_type > >& output_matrix, std::vector< dimemsion_type >& output_dims ) {
const index nr_of_columns = get_num_cols();
output_matrix.resize( nr_of_columns );
output_dims.resize( nr_of_columns );
column temp_col;
for( index cur_col = 0; cur_col < nr_of_columns; cur_col++ ) {
output_dims[ cur_col ] = (dimemsion_type)get_dim( cur_col );
get_col( cur_col, temp_col );
index num_rows = temp_col.size();
output_matrix[ cur_col ].clear();
output_matrix[ cur_col ].resize( num_rows );
for( index cur_row = 0; cur_row < num_rows; cur_row++ )
output_matrix[ cur_col ][ cur_row ] = (index_type)temp_col[ cur_row ];
}
}
// Loads the boundary_matrix from given file in ascii format
// Format: each line represents a column, first number is dimension, other numbers are the content of the column.
// Ignores empty lines and lines starting with a '#'.
bool load_ascii( std::string filename ) {
// first count number of columns:
std::string cur_line;
std::ifstream dummy( filename .c_str() );
if( dummy.fail() )
return false;
index number_of_columns = 0;
while( getline( dummy, cur_line ) ) {
cur_line.erase(cur_line.find_last_not_of(" \t\n\r\f\v") + 1);
if( cur_line != "" && cur_line[ 0 ] != '#' )
number_of_columns++;
}
this->set_num_cols( number_of_columns );
dummy.close();
std::ifstream input_stream( filename.c_str() );
if( input_stream.fail() )
return false;
column temp_col;
index cur_col = -1;
while( getline( input_stream, cur_line ) ) {
cur_line.erase(cur_line.find_last_not_of(" \t\n\r\f\v") + 1);
if( cur_line != "" && cur_line[ 0 ] != '#' ) {
cur_col++;
std::stringstream ss( cur_line );
int64_t temp_dim;
ss >> temp_dim;
this->set_dim( cur_col, (dimension) temp_dim );
int64_t temp_index;
temp_col.clear();
while( ss.good() ) {
ss >> temp_index;
temp_col.push_back( (index)temp_index );
}
std::sort( temp_col.begin(), temp_col.end() );
this->set_col( cur_col, temp_col );
}
}
input_stream.close();
return true;
}
// Saves the boundary_matrix to given file in ascii format
// Format: each line represents a column, first number is dimension, other numbers are the content of the column
bool save_ascii( std::string filename ) {
std::ofstream output_stream( filename.c_str() );
if( output_stream.fail() )
return false;
const index nr_columns = this->get_num_cols();
column tempCol;
for( index cur_col = 0; cur_col < nr_columns; cur_col++ ) {
output_stream << (int64_t)this->get_dim( cur_col );
this->get_col( cur_col, tempCol );
for( index cur_row_idx = 0; cur_row_idx < (index)tempCol.size(); cur_row_idx++ )
output_stream << " " << tempCol[ cur_row_idx ];
output_stream << std::endl;
}
output_stream.close();
return true;
}
// Loads boundary_matrix from given file
// Format: nr_columns % dim1 % N1 % row1 row2 % ...% rowN1 % dim2 % N2 % ...
bool load_binary( std::string filename )
{
std::ifstream input_stream( filename.c_str( ), std::ios_base::binary | std::ios_base::in );
if( input_stream.fail( ) )
return false;
int64_t nr_columns;
input_stream.read( (char*)&nr_columns, sizeof( int64_t ) );
this->set_num_cols( (index)nr_columns );
column temp_col;
for( index cur_col = 0; cur_col < nr_columns; cur_col++ ) {
int64_t cur_dim;
input_stream.read( (char*)&cur_dim, sizeof( int64_t ) );
this->set_dim( cur_col, (dimension)cur_dim );
int64_t nr_rows;
input_stream.read( (char*)&nr_rows, sizeof( int64_t ) );
temp_col.resize( ( std::size_t )nr_rows );
for( index idx = 0; idx < nr_rows; idx++ ) {
int64_t cur_row;
input_stream.read( (char*)&cur_row, sizeof( int64_t ) );
temp_col[ idx ] = (index)cur_row;
}
this->set_col( cur_col, temp_col );
}
input_stream.close( );
return true;
}
// Saves the boundary_matrix to given file in binary format
// Format: nr_columns % dim1 % N1 % row1 row2 % ...% rowN1 % dim2 % N2 % ...
bool save_binary( std::string filename )
{
std::ofstream output_stream( filename.c_str( ), std::ios_base::binary | std::ios_base::out );
if( output_stream.fail( ) )
return false;
const int64_t nr_columns = this->get_num_cols( );
output_stream.write( (char*)&nr_columns, sizeof( int64_t ) );
column tempCol;
for( index cur_col = 0; cur_col < nr_columns; cur_col++ ) {
int64_t cur_dim = this->get_dim( cur_col );
output_stream.write( (char*)&cur_dim, sizeof( int64_t ) );
this->get_col( cur_col, tempCol );
int64_t cur_nr_rows = tempCol.size( );
output_stream.write( (char*)&cur_nr_rows, sizeof( int64_t ) );
for( index cur_row_idx = 0; cur_row_idx < (index)tempCol.size( ); cur_row_idx++ ) {
int64_t cur_row = tempCol[ cur_row_idx ];
output_stream.write( (char*)&cur_row, sizeof( int64_t ) );
}
}
output_stream.close( );
return true;
}
};
}
|
SplineHybridAdoptorReaderP.h | //////////////////////////////////////////////////////////////////////////////////////
// This file is distributed under the University of Illinois/NCSA Open Source License.
// See LICENSE file in top directory for details.
//
// Copyright (c) 2016 Jeongnim Kim and QMCPACK developers.
//
// File developed by: Jeremy McMinnis, jmcminis@gmail.com, University of Illinois at Urbana-Champaign
// Jeongnim Kim, jeongnim.kim@gmail.com, University of Illinois at Urbana-Champaign
// Ye Luo, yeluo@anl.gov, Argonne National Laboratory
// Mark A. Berrill, berrillma@ornl.gov, Oak Ridge National Laboratory
// Jeongnim Kim, jeongnim.kim@inte.com, Intel Corp.
//
// File created by: Jeongnim Kim, jeongnim.kim@gmail.com, University of Illinois at Urbana-Champaign
//////////////////////////////////////////////////////////////////////////////////////
/** @file SplineHybridAdoptorReader.h
*
* derived from SplineAdoptorReader
*/
#ifndef QMCPLUSPLUS_EINSPLINE_HYBRID_ADOPTOR_READERP_H
#define QMCPLUSPLUS_EINSPLINE_HYBRID_ADOPTOR_READERP_H
#include <Numerics/Quadrature.h>
#include <Numerics/Bessel.h>
#include <QMCWaveFunctions/BsplineFactory/HybridAdoptorBase.h>
#include "OhmmsData/AttributeSet.h"
//#include <QMCHamiltonians/Ylm.h>
//#define PRINT_RADIAL
namespace qmcplusplus
{
template<typename ST, typename LT>
struct Gvectors
{
typedef TinyVector<ST, 3> PosType;
typedef std::complex<ST> ValueType;
const LT& Lattice;
std::vector<PosType> gvecs_cart; //Cartesian.
std::vector<ST> gmag;
const size_t NumGvecs;
Gvectors(const std::vector<TinyVector<int, 3>>& gvecs_in,
const LT& Lattice_in,
const TinyVector<int, 3>& HalfG,
size_t first,
size_t last)
: Lattice(Lattice_in), NumGvecs(last - first)
{
gvecs_cart.resize(NumGvecs);
gmag.resize(NumGvecs);
#pragma omp parallel for
for (size_t ig = 0; ig < NumGvecs; ig++)
{
TinyVector<ST, 3> gvec_shift;
gvec_shift = gvecs_in[ig + first] + HalfG * 0.5;
gvecs_cart[ig] = Lattice.k_cart(gvec_shift);
gmag[ig] = std::sqrt(dot(gvecs_cart[ig], gvecs_cart[ig]));
}
}
template<typename YLM_ENGINE, typename VVT>
void calc_Ylm_G(const size_t ig, YLM_ENGINE& Ylm, VVT& YlmG) const
{
PosType Ghat(0.0, 0.0, 1.0);
if (gmag[ig] > 0)
Ghat = gvecs_cart[ig] / gmag[ig];
Ylm.evaluateV(Ghat[0], Ghat[1], Ghat[2], YlmG.data());
}
template<typename VVT>
inline void calc_jlm_G(const int lmax, ST& r, const size_t ig, VVT& j_lm_G) const
{
bessel_steed_array_cpu(lmax, gmag[ig] * r, j_lm_G.data());
for (size_t l = lmax; l > 0; l--)
for (size_t lm = l * l; lm < (l + 1) * (l + 1); lm++)
j_lm_G[lm] = j_lm_G[l];
}
template<typename PT, typename VT>
inline void calc_phase_shift(const PT& RSoA, const size_t ig, VT& phase_shift_real, VT& phase_shift_imag) const
{
const ST* restrict px = RSoA.data(0);
const ST* restrict py = RSoA.data(1);
const ST* restrict pz = RSoA.data(2);
ST* restrict v_r = phase_shift_real.data();
ST* restrict v_i = phase_shift_imag.data();
const ST& gv_x = gvecs_cart[ig][0];
const ST& gv_y = gvecs_cart[ig][1];
const ST& gv_z = gvecs_cart[ig][2];
#pragma omp simd aligned(px, py, pz, v_r, v_i)
for (size_t iat = 0; iat < RSoA.size(); iat++)
sincos(px[iat] * gv_x + py[iat] * gv_y + pz[iat] * gv_z, v_i + iat, v_r + iat);
}
template<typename PT>
ValueType evaluate_psi_r(const Vector<std::complex<double>>& cG, const PT& pos)
{
assert(cG.size() == NumGvecs);
std::complex<ST> val(0.0, 0.0);
for (size_t ig = 0; ig < NumGvecs; ig++)
{
ST s, c;
sincos(dot(gvecs_cart[ig], pos), &s, &c);
ValueType pw0(c, s);
val += cG[ig] * pw0;
}
return val;
}
template<typename PT>
void evaluate_psi_r(const Vector<std::complex<double>>& cG, const PT& pos, ValueType& phi, ValueType& d2phi)
{
assert(cG.size() == NumGvecs);
d2phi = phi = 0.0;
for (size_t ig = 0; ig < NumGvecs; ig++)
{
ST s, c;
sincos(dot(gvecs_cart[ig], pos), &s, &c);
ValueType pw0(c, s);
phi += cG[ig] * pw0;
d2phi += cG[ig] * pw0 * (-dot(gvecs_cart[ig], gvecs_cart[ig]));
}
}
double evaluate_KE(const Vector<std::complex<double>>& cG)
{
assert(cG.size() == NumGvecs);
double KE = 0;
for (size_t ig = 0; ig < NumGvecs; ig++)
KE += dot(gvecs_cart[ig], gvecs_cart[ig]) * (cG[ig].real() * cG[ig].real() + cG[ig].imag() * cG[ig].imag());
return KE / 2.0;
}
};
/** General SplineHybridAdoptorReader to handle any unitcell
*/
template<typename SA>
struct SplineHybridAdoptorReader : public SplineAdoptorReader<SA>
{
typedef SplineAdoptorReader<SA> BaseReader;
using BaseReader::bspline;
using BaseReader::mybuilder;
using BaseReader::rotate_phase_i;
using BaseReader::rotate_phase_r;
using typename BaseReader::DataType;
SplineHybridAdoptorReader(EinsplineSetBuilder* e) : BaseReader(e) {}
/** initialize basic parameters of atomic orbitals */
void initialize_hybridrep_atomic_centers() override
{
OhmmsAttributeSet a;
std::string scheme_name("Consistent");
std::string s_function_name("LEKS2018");
a.add(scheme_name, "smoothing_scheme");
a.add(s_function_name, "smoothing_function");
a.put(mybuilder->XMLRoot);
// assign smooth_scheme
if ( scheme_name == "Consistent" )
bspline->smooth_scheme = SA::smoothing_schemes::CONSISTENT;
else if ( scheme_name == "SmoothAll" )
bspline->smooth_scheme = SA::smoothing_schemes::SMOOTHALL;
else if ( scheme_name == "SmoothPartial" )
bspline->smooth_scheme = SA::smoothing_schemes::SMOOTHPARTIAL;
else
APP_ABORT("initialize_hybridrep_atomic_centers wrong smoothing_scheme name! Only allows Consistent, SmoothAll or SmoothPartial.");
// assign smooth_function
if ( s_function_name == "LEKS2018" )
bspline->smooth_func_id = smoothing_functions::LEKS2018;
else if ( s_function_name == "coscos" )
bspline->smooth_func_id = smoothing_functions::COSCOS;
else if ( s_function_name == "linear" )
bspline->smooth_func_id = smoothing_functions::LINEAR;
else
APP_ABORT("initialize_hybridrep_atomic_centers wrong smoothing_function name! Only allows LEKS2018, coscos or linear.");
app_log() << "Hybrid orbital representation uses " << scheme_name << " smoothing scheme and " << s_function_name << " smoothing function." << std::endl;
bspline->set_info(*(mybuilder->SourcePtcl), mybuilder->TargetPtcl, mybuilder->Super2Prim);
auto& centers = bspline->AtomicCenters;
auto& ACInfo = mybuilder->AtomicCentersInfo;
// load atomic center info only when it is not initialized
if (centers.size() == 0)
{
bool success = true;
app_log() << "Reading atomic center info for hybrid representation" << std::endl;
for (int center_idx = 0; center_idx < ACInfo.Ncenters; center_idx++)
{
const int my_GroupID = ACInfo.GroupID[center_idx];
if (ACInfo.cutoff[center_idx] < 0)
{
app_error() << "Hybrid orbital representation needs parameter 'cutoff_radius' for atom " << center_idx
<< std::endl;
success = false;
}
if (ACInfo.inner_cutoff[center_idx] < 0)
{
const double inner_cutoff = std::max(ACInfo.cutoff[center_idx] - 0.3, 0.0);
app_log() << "Hybrid orbital representation setting 'inner_cutoff' to " << inner_cutoff << " for group "
<< my_GroupID << " as atom " << center_idx << std::endl;
// overwrite the inner_cutoff of all the atoms of the same species
for (int id = 0; id < ACInfo.Ncenters; id++)
if (my_GroupID == ACInfo.GroupID[id])
ACInfo.inner_cutoff[id] = inner_cutoff;
}
else if (ACInfo.inner_cutoff[center_idx] > ACInfo.cutoff[center_idx])
{
app_error() << "Hybrid orbital representation 'inner_cutoff' must be smaller than 'spline_radius' for atom "
<< center_idx << std::endl;
success = false;
}
if (ACInfo.cutoff[center_idx] > 0)
{
if (ACInfo.lmax[center_idx] < 0)
{
app_error() << "Hybrid orbital representation needs parameter 'lmax' for atom " << center_idx << std::endl;
success = false;
}
if (ACInfo.spline_radius[center_idx] < 0 && ACInfo.spline_npoints[center_idx] < 0)
{
app_log() << "Parameters 'spline_radius' and 'spline_npoints' for group " << my_GroupID << " as atom "
<< center_idx << " are not specified." << std::endl;
const double delta = std::min(0.02, ACInfo.cutoff[center_idx] / 4.0);
const int n_grid_point = std::ceil((ACInfo.cutoff[center_idx] + 1e-4) / delta) + 3;
for (int id = 0; id < ACInfo.Ncenters; id++)
if (my_GroupID == ACInfo.GroupID[id])
{
ACInfo.spline_npoints[id] = n_grid_point;
ACInfo.spline_radius[id] = (n_grid_point - 1) * delta;
}
app_log() << " Based on default grid point distance " << delta << std::endl;
app_log() << " Setting 'spline_npoints' to " << ACInfo.spline_npoints[center_idx] << std::endl;
app_log() << " Setting 'spline_radius' to " << ACInfo.spline_radius[center_idx] << std::endl;
}
else
{
if (ACInfo.spline_radius[center_idx] < 0)
{
app_error() << "Hybrid orbital representation needs parameter 'spline_radius' for atom " << center_idx
<< std::endl;
success = false;
}
if (ACInfo.spline_npoints[center_idx] < 0)
{
app_error() << "Hybrid orbital representation needs parameter 'spline_npoints' for atom " << center_idx
<< std::endl;
success = false;
}
}
// check maximally allowed cutoff_radius
double max_allowed_cutoff = ACInfo.spline_radius[center_idx] -
2.0 * ACInfo.spline_radius[center_idx] / (ACInfo.spline_npoints[center_idx] - 1);
if (success && ACInfo.cutoff[center_idx] > max_allowed_cutoff)
{
app_error() << "Hybrid orbital representation requires cutoff_radius<=" << max_allowed_cutoff
<< " calculated by spline_radius-2*spline_radius/(spline_npoints-1) for atom " << center_idx
<< std::endl;
success = false;
}
}
else
{
// no atomic regions for this atom type
ACInfo.spline_radius[center_idx] = 0.0;
ACInfo.spline_npoints[center_idx] = 0;
ACInfo.lmax[center_idx] = 0;
}
}
if (!success)
BaseReader::myComm->barrier_and_abort("initialize_hybridrep_atomic_centers Failed to initialize atomic centers in hybrid orbital representation!");
for (int center_idx = 0; center_idx < ACInfo.Ncenters; center_idx++)
{
AtomicOrbitalSoA<DataType> oneCenter(ACInfo.lmax[center_idx]);
oneCenter.set_info(ACInfo.ion_pos[center_idx], ACInfo.cutoff[center_idx], ACInfo.inner_cutoff[center_idx],
ACInfo.spline_radius[center_idx], ACInfo.non_overlapping_radius[center_idx],
ACInfo.spline_npoints[center_idx]);
centers.push_back(oneCenter);
}
}
}
/** initialize construct atomic orbital radial functions from plane waves */
inline void create_atomic_centers_Gspace(Vector<std::complex<double>>& cG,
Communicate& band_group_comm,
int iorb) override
{
band_group_comm.bcast(rotate_phase_r);
band_group_comm.bcast(rotate_phase_i);
band_group_comm.bcast(cG);
//distribute G-vectors over processor groups
const int Ngvecs = mybuilder->Gvecs[0].size();
const int Nprocs = band_group_comm.size();
const int Ngvecgroups = std::min(Ngvecs, Nprocs);
Communicate gvec_group_comm(band_group_comm, Ngvecgroups);
std::vector<int> gvec_groups(Ngvecgroups + 1, 0);
FairDivideLow(Ngvecs, Ngvecgroups, gvec_groups);
const int gvec_first = gvec_groups[gvec_group_comm.getGroupID()];
const int gvec_last = gvec_groups[gvec_group_comm.getGroupID() + 1];
// prepare Gvecs Ylm(G)
typedef typename EinsplineSetBuilder::UnitCellType UnitCellType;
Gvectors<double, UnitCellType> Gvecs(mybuilder->Gvecs[0], mybuilder->PrimCell, bspline->HalfG, gvec_first,
gvec_last);
// if(band_group_comm.isGroupLeader()) std::cout << "print band=" << iorb << " KE=" << Gvecs.evaluate_KE(cG) << std::endl;
std::vector<AtomicOrbitalSoA<DataType>>& centers = bspline->AtomicCenters;
app_log() << "Transforming band " << iorb << " on Rank 0" << std::endl;
// collect atomic centers by group
std::vector<int> uniq_species;
for (int center_idx = 0; center_idx < centers.size(); center_idx++)
{
auto& ACInfo = mybuilder->AtomicCentersInfo;
const int my_GroupID = ACInfo.GroupID[center_idx];
int found_idx = -1;
for (size_t idx = 0; idx < uniq_species.size(); idx++)
if (my_GroupID == uniq_species[idx])
{
found_idx = idx;
break;
}
if (found_idx < 0)
uniq_species.push_back(my_GroupID);
}
// construct group list
std::vector<std::vector<int>> group_list(uniq_species.size());
for (int center_idx = 0; center_idx < centers.size(); center_idx++)
{
auto& ACInfo = mybuilder->AtomicCentersInfo;
const int my_GroupID = ACInfo.GroupID[center_idx];
for (size_t idx = 0; idx < uniq_species.size(); idx++)
if (my_GroupID == uniq_species[idx])
{
group_list[idx].push_back(center_idx);
break;
}
}
for (int group_idx = 0; group_idx < group_list.size(); group_idx++)
{
const auto& mygroup = group_list[group_idx];
const double spline_radius = centers[mygroup[0]].spline_radius;
const int spline_npoints = centers[mygroup[0]].spline_npoints;
const int lmax = centers[mygroup[0]].lmax;
const double delta = spline_radius / static_cast<double>(spline_npoints - 1);
const int lm_tot = (lmax + 1) * (lmax + 1);
const size_t natoms = mygroup.size();
const int policy = lm_tot > natoms ? 0 : 1;
std::vector<std::complex<double>> i_power(lm_tot);
// rotate phase is introduced here.
std::complex<double> i_temp(rotate_phase_r, rotate_phase_i);
for (size_t l = 0; l <= lmax; l++)
{
for (size_t lm = l * l; lm < (l + 1) * (l + 1); lm++)
i_power[lm] = i_temp;
i_temp *= std::complex<double>(0.0, 1.0);
}
std::vector<Matrix<double>> all_vals(natoms);
std::vector<std::vector<aligned_vector<double>>> vals_local(spline_npoints * omp_get_max_threads());
VectorSoaContainer<double, 3> myRSoA(natoms);
for (size_t idx = 0; idx < natoms; idx++)
{
all_vals[idx].resize(spline_npoints, lm_tot * 2);
all_vals[idx] = 0.0;
myRSoA(idx) = centers[mygroup[idx]].pos;
}
#pragma omp parallel
{
const size_t tid = omp_get_thread_num();
const size_t nt = omp_get_num_threads();
for (int ip = 0; ip < spline_npoints; ip++)
{
const size_t ip_idx = tid * spline_npoints + ip;
if (policy == 1)
{
vals_local[ip_idx].resize(lm_tot * 2);
for (size_t lm = 0; lm < lm_tot * 2; lm++)
{
auto& vals = vals_local[ip_idx][lm];
vals.resize(natoms);
std::fill(vals.begin(), vals.end(), 0.0);
}
}
else
{
vals_local[ip_idx].resize(natoms * 2);
for (size_t iat = 0; iat < natoms * 2; iat++)
{
auto& vals = vals_local[ip_idx][iat];
vals.resize(lm_tot);
std::fill(vals.begin(), vals.end(), 0.0);
}
}
}
const size_t size_pw_tile = 32;
const size_t num_pw_tiles = (Gvecs.NumGvecs + size_pw_tile - 1) / size_pw_tile;
aligned_vector<double> j_lm_G(lm_tot, 0.0);
std::vector<aligned_vector<double>> phase_shift_r(size_pw_tile);
std::vector<aligned_vector<double>> phase_shift_i(size_pw_tile);
std::vector<aligned_vector<double>> YlmG(size_pw_tile);
for (size_t ig = 0; ig < size_pw_tile; ig++)
{
phase_shift_r[ig].resize(natoms);
phase_shift_i[ig].resize(natoms);
YlmG[ig].resize(lm_tot);
}
SoaSphericalTensor<double> Ylm(lmax);
#pragma omp for
for (size_t tile_id = 0; tile_id < num_pw_tiles; tile_id++)
{
const size_t ig_first = tile_id * size_pw_tile;
const size_t ig_last = std::min((tile_id + 1) * size_pw_tile, Gvecs.NumGvecs);
for (size_t ig = ig_first; ig < ig_last; ig++)
{
const size_t ig_local = ig - ig_first;
// calculate phase shift for all the centers of this group
Gvecs.calc_phase_shift(myRSoA, ig, phase_shift_r[ig_local], phase_shift_i[ig_local]);
Gvecs.calc_Ylm_G(ig, Ylm, YlmG[ig_local]);
}
for (int ip = 0; ip < spline_npoints; ip++)
{
double r = delta * static_cast<double>(ip);
const size_t ip_idx = tid * spline_npoints + ip;
for (size_t ig = ig_first; ig < ig_last; ig++)
{
const size_t ig_local = ig - ig_first;
// calculate spherical bessel function
Gvecs.calc_jlm_G(lmax, r, ig, j_lm_G);
for (size_t lm = 0; lm < lm_tot; lm++)
j_lm_G[lm] *= YlmG[ig_local][lm];
const double cG_r = cG[ig + gvec_first].real();
const double cG_i = cG[ig + gvec_first].imag();
if (policy == 1)
{
for (size_t lm = 0; lm < lm_tot; lm++)
{
double* restrict vals_r = vals_local[ip_idx][lm * 2].data();
double* restrict vals_i = vals_local[ip_idx][lm * 2 + 1].data();
const double* restrict ps_r_ptr = phase_shift_r[ig_local].data();
const double* restrict ps_i_ptr = phase_shift_i[ig_local].data();
double cG_j_r = cG_r * j_lm_G[lm];
double cG_j_i = cG_i * j_lm_G[lm];
#pragma omp simd aligned(vals_r, vals_i, ps_r_ptr, ps_i_ptr)
for (size_t idx = 0; idx < natoms; idx++)
{
const double ps_r = ps_r_ptr[idx];
const double ps_i = ps_i_ptr[idx];
vals_r[idx] += cG_j_r * ps_r - cG_j_i * ps_i;
vals_i[idx] += cG_j_i * ps_r + cG_j_r * ps_i;
}
}
}
else
{
for (size_t idx = 0; idx < natoms; idx++)
{
double* restrict vals_r = vals_local[ip_idx][idx * 2].data();
double* restrict vals_i = vals_local[ip_idx][idx * 2 + 1].data();
const double* restrict j_lm_G_ptr = j_lm_G.data();
double cG_ps_r = cG_r * phase_shift_r[ig_local][idx] - cG_i * phase_shift_i[ig_local][idx];
double cG_ps_i = cG_i * phase_shift_r[ig_local][idx] + cG_r * phase_shift_i[ig_local][idx];
#pragma omp simd aligned(vals_r, vals_i, j_lm_G_ptr)
for (size_t lm = 0; lm < lm_tot; lm++)
{
const double jlm = j_lm_G_ptr[lm];
vals_r[lm] += cG_ps_r * jlm;
vals_i[lm] += cG_ps_i * jlm;
}
}
}
}
}
}
#pragma omp for collapse(2)
for (int ip = 0; ip < spline_npoints; ip++)
for (size_t idx = 0; idx < natoms; idx++)
{
double* vals = all_vals[idx][ip];
for (size_t tid = 0; tid < nt; tid++)
for (size_t lm = 0; lm < lm_tot; lm++)
{
double vals_th_r, vals_th_i;
const size_t ip_idx = tid * spline_npoints + ip;
if (policy == 1)
{
vals_th_r = vals_local[ip_idx][lm * 2][idx];
vals_th_i = vals_local[ip_idx][lm * 2 + 1][idx];
}
else
{
vals_th_r = vals_local[ip_idx][idx * 2][lm];
vals_th_i = vals_local[ip_idx][idx * 2 + 1][lm];
}
const double real_tmp = 4.0 * M_PI * i_power[lm].real();
const double imag_tmp = 4.0 * M_PI * i_power[lm].imag();
vals[lm] += vals_th_r * real_tmp - vals_th_i * imag_tmp;
vals[lm + lm_tot] += vals_th_i * real_tmp + vals_th_r * imag_tmp;
}
}
}
//app_log() << "Building band " << iorb << " at center " << center_idx << std::endl;
for (size_t idx = 0; idx < natoms; idx++)
{
// reduce all_vals
band_group_comm.reduce_in_place(all_vals[idx].data(), all_vals[idx].size());
if (!band_group_comm.isGroupLeader())
continue;
#pragma omp parallel for
for (int lm = 0; lm < lm_tot; lm++)
{
auto& mycenter = centers[mygroup[idx]];
aligned_vector<double> splineData_r(spline_npoints);
UBspline_1d_d* atomic_spline_r;
for (size_t ip = 0; ip < spline_npoints; ip++)
splineData_r[ip] = all_vals[idx][ip][lm];
atomic_spline_r = einspline::create(atomic_spline_r, 0.0, spline_radius, spline_npoints, splineData_r.data(),
((lm == 0) || (lm > 3)));
if (!bspline->is_complex)
{
mycenter.set_spline(atomic_spline_r, lm, iorb);
einspline::destroy(atomic_spline_r);
}
else
{
aligned_vector<double> splineData_i(spline_npoints);
UBspline_1d_d* atomic_spline_i;
for (size_t ip = 0; ip < spline_npoints; ip++)
splineData_i[ip] = all_vals[idx][ip][lm + lm_tot];
atomic_spline_i = einspline::create(atomic_spline_i, 0.0, spline_radius, spline_npoints,
splineData_i.data(), ((lm == 0) || (lm > 3)));
mycenter.set_spline(atomic_spline_r, lm, iorb * 2);
mycenter.set_spline(atomic_spline_i, lm, iorb * 2 + 1);
einspline::destroy(atomic_spline_r);
einspline::destroy(atomic_spline_i);
}
}
}
#ifdef PRINT_RADIAL
char fname[64];
sprintf(fname, "band_%d_center_%d_pw.dat", iorb, center_idx);
FILE* fout_pw = fopen(fname, "w");
sprintf(fname, "band_%d_center_%d_spline_v.dat", iorb, center_idx);
FILE* fout_spline_v = fopen(fname, "w");
sprintf(fname, "band_%d_center_%d_spline_g.dat", iorb, center_idx);
FILE* fout_spline_g = fopen(fname, "w");
sprintf(fname, "band_%d_center_%d_spline_l.dat", iorb, center_idx);
FILE* fout_spline_l = fopen(fname, "w");
fprintf(fout_pw, "# r vals(lm)\n");
fprintf(fout_spline_v, "# r vals(lm)\n");
fprintf(fout_spline_g, "# r grads(lm)\n");
fprintf(fout_spline_l, "# r lapls(lm)\n");
// write to file for plotting
for (int ip = 0; ip < spline_npoints - 1; ip++)
{
double r = delta * static_cast<double>(ip);
mycenter.SplineInst->evaluate_vgl(r, mycenter.localV, mycenter.localG, mycenter.localL);
fprintf(fout_pw, "%15.10lf ", r);
fprintf(fout_spline_v, "%15.10lf ", r);
fprintf(fout_spline_g, "%15.10lf ", r);
fprintf(fout_spline_l, "%15.10lf ", r);
for (int lm = 0; lm < lm_tot; lm++)
{
fprintf(fout_pw, "%15.10lf %15.10lf ", all_vals[center_idx][ip][lm].real(),
all_vals[center_idx][ip][lm].imag());
fprintf(fout_spline_v, "%15.10lf %15.10lf ", mycenter.localV[lm * mycenter.Npad + iorb * 2],
mycenter.localV[lm * mycenter.Npad + iorb * 2 + 1]);
fprintf(fout_spline_g, "%15.10lf %15.10lf ", mycenter.localG[lm * mycenter.Npad + iorb * 2],
mycenter.localG[lm * mycenter.Npad + iorb * 2 + 1]);
fprintf(fout_spline_l, "%15.10lf %15.10lf ", mycenter.localL[lm * mycenter.Npad + iorb * 2],
mycenter.localL[lm * mycenter.Npad + iorb * 2 + 1]);
}
fprintf(fout_pw, "\n");
fprintf(fout_spline_v, "\n");
fprintf(fout_spline_g, "\n");
fprintf(fout_spline_l, "\n");
}
fclose(fout_pw);
fclose(fout_spline_v);
fclose(fout_spline_g);
fclose(fout_spline_l);
#endif
}
}
};
} // namespace qmcplusplus
#endif
|
cache.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC AAA CCCC H H EEEEE %
% C A A C H H E %
% C AAAAA C HHHHH EEE %
% C A A C H H E %
% CCCC A A CCCC H H EEEEE %
% %
% %
% MagickCore Pixel Cache Methods %
% %
% Software Design %
% Cristy %
% July 1999 %
% %
% %
% Copyright @ 1999 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/distribute-cache-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/nt-base-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/policy.h"
#include "MagickCore/quantum.h"
#include "MagickCore/random_.h"
#include "MagickCore/registry.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/timer-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
#if defined(MAGICKCORE_ZLIB_DELEGATE)
#include "zlib.h"
#endif
/*
Define declarations.
*/
#define CacheTick(offset,extent) QuantumTick((MagickOffsetType) offset,extent)
#define IsFileDescriptorLimitExceeded() (GetMagickResource(FileResource) > \
GetMagickResourceLimit(FileResource) ? MagickTrue : MagickFalse)
/*
Typedef declarations.
*/
typedef struct _MagickModulo
{
ssize_t
quotient,
remainder;
} MagickModulo;
/*
Forward declarations.
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static Cache
GetImagePixelCache(Image *,const MagickBooleanType,ExceptionInfo *)
magick_hot_spot;
static const Quantum
*GetVirtualPixelCache(const Image *,const VirtualPixelMethod,const ssize_t,
const ssize_t,const size_t,const size_t,ExceptionInfo *),
*GetVirtualPixelsCache(const Image *);
static const void
*GetVirtualMetacontentFromCache(const Image *);
static MagickBooleanType
GetOneAuthenticPixelFromCache(Image *,const ssize_t,const ssize_t,Quantum *,
ExceptionInfo *),
GetOneVirtualPixelFromCache(const Image *,const VirtualPixelMethod,
const ssize_t,const ssize_t,Quantum *,ExceptionInfo *),
OpenPixelCache(Image *,const MapMode,ExceptionInfo *),
OpenPixelCacheOnDisk(CacheInfo *,const MapMode),
ReadPixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict,
ExceptionInfo *),
ReadPixelCacheMetacontent(CacheInfo *magick_restrict,
NexusInfo *magick_restrict,ExceptionInfo *),
SyncAuthenticPixelsCache(Image *,ExceptionInfo *),
WritePixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict,
ExceptionInfo *),
WritePixelCacheMetacontent(CacheInfo *,NexusInfo *magick_restrict,
ExceptionInfo *);
static Quantum
*GetAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t,
const size_t,ExceptionInfo *),
*QueueAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t,
const size_t,ExceptionInfo *),
*SetPixelCacheNexusPixels(const CacheInfo *magick_restrict,const MapMode,
const ssize_t,const ssize_t,const size_t,const size_t,
const MagickBooleanType,NexusInfo *magick_restrict,ExceptionInfo *)
magick_hot_spot;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
static void
CopyOpenCLBuffer(CacheInfo *magick_restrict);
#endif
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
/*
Global declarations.
*/
static SemaphoreInfo
*cache_semaphore = (SemaphoreInfo *) NULL;
static ssize_t
cache_anonymous_memory = (-1);
static time_t
cache_epoch = 0;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCache() acquires a pixel cache.
%
% The format of the AcquirePixelCache() method is:
%
% Cache AcquirePixelCache(const size_t number_threads)
%
% A description of each parameter follows:
%
% o number_threads: the number of nexus threads.
%
*/
MagickPrivate Cache AcquirePixelCache(const size_t number_threads)
{
CacheInfo
*magick_restrict cache_info;
char
*value;
cache_info=(CacheInfo *) AcquireAlignedMemory(1,sizeof(*cache_info));
if (cache_info == (CacheInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(cache_info,0,sizeof(*cache_info));
cache_info->type=UndefinedCache;
cache_info->mode=IOMode;
cache_info->disk_mode=IOMode;
cache_info->colorspace=sRGBColorspace;
cache_info->file=(-1);
cache_info->id=GetMagickThreadId();
cache_info->number_threads=number_threads;
if (GetOpenMPMaximumThreads() > cache_info->number_threads)
cache_info->number_threads=GetOpenMPMaximumThreads();
if (GetMagickResourceLimit(ThreadResource) > cache_info->number_threads)
cache_info->number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
if (cache_info->number_threads == 0)
cache_info->number_threads=1;
cache_info->nexus_info=AcquirePixelCacheNexus(cache_info->number_threads);
if (cache_info->nexus_info == (NexusInfo **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
value=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (value != (const char *) NULL)
{
cache_info->synchronize=IsStringTrue(value);
value=DestroyString(value);
}
value=GetPolicyValue("cache:synchronize");
if (value != (const char *) NULL)
{
cache_info->synchronize=IsStringTrue(value);
value=DestroyString(value);
}
cache_info->width_limit=MagickMin(GetMagickResourceLimit(WidthResource),
(MagickSizeType) MAGICK_SSIZE_MAX);
cache_info->height_limit=MagickMin(GetMagickResourceLimit(HeightResource),
(MagickSizeType) MAGICK_SSIZE_MAX);
cache_info->semaphore=AcquireSemaphoreInfo();
cache_info->reference_count=1;
cache_info->file_semaphore=AcquireSemaphoreInfo();
cache_info->debug=IsEventLogging();
cache_info->signature=MagickCoreSignature;
return((Cache ) cache_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCacheNexus() allocates the NexusInfo structure.
%
% The format of the AcquirePixelCacheNexus method is:
%
% NexusInfo **AcquirePixelCacheNexus(const size_t number_threads)
%
% A description of each parameter follows:
%
% o number_threads: the number of nexus threads.
%
*/
MagickPrivate NexusInfo **AcquirePixelCacheNexus(const size_t number_threads)
{
NexusInfo
**magick_restrict nexus_info;
ssize_t
i;
nexus_info=(NexusInfo **) MagickAssumeAligned(AcquireAlignedMemory(2*
number_threads,sizeof(*nexus_info)));
if (nexus_info == (NexusInfo **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
*nexus_info=(NexusInfo *) AcquireQuantumMemory(number_threads,
2*sizeof(**nexus_info));
if (*nexus_info == (NexusInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(*nexus_info,0,2*number_threads*sizeof(**nexus_info));
for (i=0; i < (ssize_t) (2*number_threads); i++)
{
nexus_info[i]=(*nexus_info+i);
if (i < (ssize_t) number_threads)
nexus_info[i]->virtual_nexus=(*nexus_info+number_threads+i);
nexus_info[i]->signature=MagickCoreSignature;
}
return(nexus_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCachePixels() returns the pixels associated with the specified
% image.
%
% The format of the AcquirePixelCachePixels() method is:
%
% void *AcquirePixelCachePixels(const Image *image,size_t *length,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o length: the pixel cache length.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void *AcquirePixelCachePixels(const Image *image,size_t *length,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
(void) exception;
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*length=0;
if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache))
return((void *) NULL);
*length=(size_t) cache_info->length;
return(cache_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a c h e C o m p o n e n t G e n e s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CacheComponentGenesis() instantiates the cache component.
%
% The format of the CacheComponentGenesis method is:
%
% MagickBooleanType CacheComponentGenesis(void)
%
*/
MagickPrivate MagickBooleanType CacheComponentGenesis(void)
{
if (cache_semaphore == (SemaphoreInfo *) NULL)
cache_semaphore=AcquireSemaphoreInfo();
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a c h e C o m p o n e n t T e r m i n u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CacheComponentTerminus() destroys the cache component.
%
% The format of the CacheComponentTerminus() method is:
%
% CacheComponentTerminus(void)
%
*/
MagickPrivate void CacheComponentTerminus(void)
{
if (cache_semaphore == (SemaphoreInfo *) NULL)
ActivateSemaphoreInfo(&cache_semaphore);
/* no op-- nothing to destroy */
RelinquishSemaphoreInfo(&cache_semaphore);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l i p P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClipPixelCacheNexus() clips the cache nexus as defined by the image clip
% mask. The method returns MagickTrue if the pixel region is clipped,
% otherwise MagickFalse.
%
% The format of the ClipPixelCacheNexus() method is:
%
% MagickBooleanType ClipPixelCacheNexus(Image *image,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to clip.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ClipPixelCacheNexus(Image *image,
NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
y;
/*
Apply clip mask.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->channels & WriteMaskChannel) == 0)
return(MagickTrue);
if ((nexus_info->region.width == 0) || (nexus_info->region.height == 0))
return(MagickTrue);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return(MagickFalse);
p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y,
nexus_info->region.width,nexus_info->region.height,
nexus_info->virtual_nexus,exception);
q=nexus_info->pixels;
if ((p == (Quantum *) NULL) || (q == (Quantum *) NULL))
return(MagickFalse);
for (y=0; y < (ssize_t) nexus_info->region.height; y++)
{
ssize_t
x;
for (x=0; x < (ssize_t) nexus_info->region.width; x++)
{
double
mask_alpha;
ssize_t
i;
mask_alpha=QuantumScale*GetPixelWriteMask(image,p);
if (fabs(mask_alpha) >= MagickEpsilon)
{
for (i=0; i < (ssize_t) image->number_channels; i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(MagickOver_((double) p[i],mask_alpha*
GetPixelAlpha(image,p),(double) q[i],(double)
GetPixelAlpha(image,q)));
}
SetPixelAlpha(image,GetPixelAlpha(image,p),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCache() clones a pixel cache.
%
% The format of the ClonePixelCache() method is:
%
% Cache ClonePixelCache(const Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
MagickPrivate Cache ClonePixelCache(const Cache cache)
{
CacheInfo
*magick_restrict clone_info;
const CacheInfo
*magick_restrict cache_info;
assert(cache != NULL);
cache_info=(const CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
clone_info=(CacheInfo *) AcquirePixelCache(cache_info->number_threads);
clone_info->virtual_pixel_method=cache_info->virtual_pixel_method;
return((Cache ) clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCacheMethods() clones the pixel cache methods from one cache to
% another.
%
% The format of the ClonePixelCacheMethods() method is:
%
% void ClonePixelCacheMethods(Cache clone,const Cache cache)
%
% A description of each parameter follows:
%
% o clone: Specifies a pointer to a Cache structure.
%
% o cache: the pixel cache.
%
*/
MagickPrivate void ClonePixelCacheMethods(Cache clone,const Cache cache)
{
CacheInfo
*magick_restrict cache_info,
*magick_restrict source_info;
assert(clone != (Cache) NULL);
source_info=(CacheInfo *) clone;
assert(source_info->signature == MagickCoreSignature);
if (source_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
source_info->filename);
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
source_info->methods=cache_info->methods;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e R e p o s i t o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCacheRepository() clones the source pixel cache to the destination
% cache.
%
% The format of the ClonePixelCacheRepository() method is:
%
% MagickBooleanType ClonePixelCacheRepository(CacheInfo *cache_info,
% CacheInfo *source_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o source_info: the source pixel cache.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ClonePixelCacheOnDisk(
CacheInfo *magick_restrict cache_info,CacheInfo *magick_restrict clone_info)
{
MagickSizeType
extent;
size_t
quantum;
ssize_t
count;
struct stat
file_stats;
unsigned char
*buffer;
/*
Clone pixel cache on disk with identical morphology.
*/
if ((OpenPixelCacheOnDisk(cache_info,ReadMode) == MagickFalse) ||
(OpenPixelCacheOnDisk(clone_info,IOMode) == MagickFalse))
return(MagickFalse);
if ((lseek(cache_info->file,0,SEEK_SET) < 0) ||
(lseek(clone_info->file,0,SEEK_SET) < 0))
return(MagickFalse);
quantum=(size_t) MagickMaxBufferExtent;
if ((fstat(cache_info->file,&file_stats) == 0) && (file_stats.st_size > 0))
{
#if defined(MAGICKCORE_HAVE_LINUX_SENDFILE)
if (cache_info->length < 0x7ffff000)
{
count=sendfile(clone_info->file,cache_info->file,(off_t *) NULL,
(size_t) cache_info->length);
if (count == (ssize_t) cache_info->length)
return(MagickTrue);
if ((lseek(cache_info->file,0,SEEK_SET) < 0) ||
(lseek(clone_info->file,0,SEEK_SET) < 0))
return(MagickFalse);
}
#endif
quantum=(size_t) MagickMin(file_stats.st_size,MagickMaxBufferExtent);
}
buffer=(unsigned char *) AcquireQuantumMemory(quantum,sizeof(*buffer));
if (buffer == (unsigned char *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
extent=0;
while ((count=read(cache_info->file,buffer,quantum)) > 0)
{
ssize_t
number_bytes;
number_bytes=write(clone_info->file,buffer,(size_t) count);
if (number_bytes != count)
break;
extent+=number_bytes;
}
buffer=(unsigned char *) RelinquishMagickMemory(buffer);
if (extent != cache_info->length)
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType ClonePixelCacheRepository(
CacheInfo *magick_restrict clone_info,CacheInfo *magick_restrict cache_info,
ExceptionInfo *exception)
{
#define MaxCacheThreads ((size_t) GetMagickResourceLimit(ThreadResource))
#define cache_number_threads(source,destination,chunk,multithreaded) \
num_threads((multithreaded) == 0 ? 1 : \
(((source)->type != MemoryCache) && ((source)->type != MapCache)) || \
(((destination)->type != MemoryCache) && ((destination)->type != MapCache)) ? \
MagickMax(MagickMin(GetMagickResourceLimit(ThreadResource),2),1) : \
MagickMax(MagickMin((ssize_t) GetMagickResourceLimit(ThreadResource),(ssize_t) (chunk)/256),1))
MagickBooleanType
optimize,
status;
NexusInfo
**magick_restrict cache_nexus,
**magick_restrict clone_nexus;
size_t
length;
ssize_t
y;
assert(cache_info != (CacheInfo *) NULL);
assert(clone_info != (CacheInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
if (cache_info->type == PingCache)
return(MagickTrue);
length=cache_info->number_channels*sizeof(*cache_info->channel_map);
if ((cache_info->storage_class == clone_info->storage_class) &&
(cache_info->colorspace == clone_info->colorspace) &&
(cache_info->alpha_trait == clone_info->alpha_trait) &&
(cache_info->channels == clone_info->channels) &&
(cache_info->columns == clone_info->columns) &&
(cache_info->rows == clone_info->rows) &&
(cache_info->number_channels == clone_info->number_channels) &&
(memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) &&
(cache_info->metacontent_extent == clone_info->metacontent_extent))
{
/*
Identical pixel cache morphology.
*/
if (((cache_info->type == MemoryCache) ||
(cache_info->type == MapCache)) &&
((clone_info->type == MemoryCache) || (clone_info->type == MapCache)))
{
(void) memcpy(clone_info->pixels,cache_info->pixels,
cache_info->number_channels*cache_info->columns*cache_info->rows*
sizeof(*cache_info->pixels));
if ((cache_info->metacontent_extent != 0) &&
(clone_info->metacontent_extent != 0))
(void) memcpy(clone_info->metacontent,cache_info->metacontent,
cache_info->columns*cache_info->rows*
clone_info->metacontent_extent*sizeof(unsigned char));
return(MagickTrue);
}
if ((cache_info->type == DiskCache) && (clone_info->type == DiskCache))
return(ClonePixelCacheOnDisk(cache_info,clone_info));
}
/*
Mismatched pixel cache morphology.
*/
cache_nexus=AcquirePixelCacheNexus(cache_info->number_threads);
clone_nexus=AcquirePixelCacheNexus(clone_info->number_threads);
length=cache_info->number_channels*sizeof(*cache_info->channel_map);
optimize=(cache_info->number_channels == clone_info->number_channels) &&
(memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) ?
MagickTrue : MagickFalse;
length=(size_t) MagickMin(cache_info->number_channels*cache_info->columns,
clone_info->number_channels*clone_info->columns);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
cache_number_threads(cache_info,clone_info,cache_info->rows,1)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*pixels;
ssize_t
x;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y,
cache_info->columns,1,MagickFalse,cache_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
status=ReadPixelCachePixels(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y,
clone_info->columns,1,MagickFalse,clone_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
(void) memset(clone_nexus[id]->pixels,0,(size_t) clone_nexus[id]->length);
if (optimize != MagickFalse)
(void) memcpy(clone_nexus[id]->pixels,cache_nexus[id]->pixels,length*
sizeof(Quantum));
else
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
/*
Mismatched pixel channel map.
*/
p=cache_nexus[id]->pixels;
q=clone_nexus[id]->pixels;
for (x=0; x < (ssize_t) cache_info->columns; x++)
{
ssize_t
i;
if (x == (ssize_t) clone_info->columns)
break;
for (i=0; i < (ssize_t) clone_info->number_channels; i++)
{
PixelChannel
channel;
PixelTrait
traits;
channel=clone_info->channel_map[i].channel;
traits=cache_info->channel_map[channel].traits;
if (traits != UndefinedPixelTrait)
*q=*(p+cache_info->channel_map[channel].offset);
q++;
}
p+=cache_info->number_channels;
}
}
status=WritePixelCachePixels(clone_info,clone_nexus[id],exception);
}
if ((cache_info->metacontent_extent != 0) &&
(clone_info->metacontent_extent != 0))
{
/*
Clone metacontent.
*/
length=(size_t) MagickMin(cache_info->metacontent_extent,
clone_info->metacontent_extent);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
cache_number_threads(cache_info,clone_info,cache_info->rows,1)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*pixels;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y,
cache_info->columns,1,MagickFalse,cache_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
status=ReadPixelCacheMetacontent(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y,
clone_info->columns,1,MagickFalse,clone_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
if ((clone_nexus[id]->metacontent != (void *) NULL) &&
(cache_nexus[id]->metacontent != (void *) NULL))
(void) memcpy(clone_nexus[id]->metacontent,
cache_nexus[id]->metacontent,length*sizeof(unsigned char));
status=WritePixelCacheMetacontent(clone_info,clone_nexus[id],exception);
}
}
clone_nexus=DestroyPixelCacheNexus(clone_nexus,clone_info->number_threads);
cache_nexus=DestroyPixelCacheNexus(cache_nexus,cache_info->number_threads);
if (cache_info->debug != MagickFalse)
{
char
message[MagickPathExtent];
(void) FormatLocaleString(message,MagickPathExtent,"%s => %s",
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type),
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) clone_info->type));
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImagePixelCache() deallocates memory associated with the pixel cache.
%
% The format of the DestroyImagePixelCache() method is:
%
% void DestroyImagePixelCache(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void DestroyImagePixelCache(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->cache != (void *) NULL)
image->cache=DestroyPixelCache(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImagePixels() deallocates memory associated with the pixel cache.
%
% The format of the DestroyImagePixels() method is:
%
% void DestroyImagePixels(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DestroyImagePixels(Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.destroy_pixel_handler != (DestroyPixelHandler) NULL)
{
cache_info->methods.destroy_pixel_handler(image);
return;
}
image->cache=DestroyPixelCache(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelCache() deallocates memory associated with the pixel cache.
%
% The format of the DestroyPixelCache() method is:
%
% Cache DestroyPixelCache(Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
static MagickBooleanType ClosePixelCacheOnDisk(CacheInfo *cache_info)
{
int
status;
status=(-1);
if (cache_info->file != -1)
{
status=close(cache_info->file);
cache_info->file=(-1);
RelinquishMagickResource(FileResource,1);
}
return(status == -1 ? MagickFalse : MagickTrue);
}
static inline void RelinquishPixelCachePixels(CacheInfo *cache_info)
{
switch (cache_info->type)
{
case MemoryCache:
{
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (cache_info->opencl != (MagickCLCacheInfo) NULL)
{
cache_info->opencl=RelinquishMagickCLCacheInfo(cache_info->opencl,
MagickTrue);
cache_info->pixels=(Quantum *) NULL;
break;
}
#endif
if (cache_info->mapped == MagickFalse)
cache_info->pixels=(Quantum *) RelinquishAlignedMemory(
cache_info->pixels);
else
(void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length);
RelinquishMagickResource(MemoryResource,cache_info->length);
break;
}
case MapCache:
{
(void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length);
cache_info->pixels=(Quantum *) NULL;
if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode))
(void) RelinquishUniqueFileResource(cache_info->cache_filename);
*cache_info->cache_filename='\0';
RelinquishMagickResource(MapResource,cache_info->length);
}
case DiskCache:
{
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode))
(void) RelinquishUniqueFileResource(cache_info->cache_filename);
*cache_info->cache_filename='\0';
RelinquishMagickResource(DiskResource,cache_info->length);
break;
}
case DistributedCache:
{
*cache_info->cache_filename='\0';
(void) RelinquishDistributePixelCache((DistributeCacheInfo *)
cache_info->server_info);
break;
}
default:
break;
}
cache_info->type=UndefinedCache;
cache_info->mapped=MagickFalse;
cache_info->metacontent=(void *) NULL;
}
MagickPrivate Cache DestroyPixelCache(Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
LockSemaphoreInfo(cache_info->semaphore);
cache_info->reference_count--;
if (cache_info->reference_count != 0)
{
UnlockSemaphoreInfo(cache_info->semaphore);
return((Cache) NULL);
}
UnlockSemaphoreInfo(cache_info->semaphore);
if (cache_info->debug != MagickFalse)
{
char
message[MagickPathExtent];
(void) FormatLocaleString(message,MagickPathExtent,"destroy %s",
cache_info->filename);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
RelinquishPixelCachePixels(cache_info);
if (cache_info->server_info != (DistributeCacheInfo *) NULL)
cache_info->server_info=DestroyDistributeCacheInfo((DistributeCacheInfo *)
cache_info->server_info);
if (cache_info->nexus_info != (NexusInfo **) NULL)
cache_info->nexus_info=DestroyPixelCacheNexus(cache_info->nexus_info,
cache_info->number_threads);
if (cache_info->random_info != (RandomInfo *) NULL)
cache_info->random_info=DestroyRandomInfo(cache_info->random_info);
if (cache_info->file_semaphore != (SemaphoreInfo *) NULL)
RelinquishSemaphoreInfo(&cache_info->file_semaphore);
if (cache_info->semaphore != (SemaphoreInfo *) NULL)
RelinquishSemaphoreInfo(&cache_info->semaphore);
cache_info->signature=(~MagickCoreSignature);
cache_info=(CacheInfo *) RelinquishAlignedMemory(cache_info);
cache=(Cache) NULL;
return(cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelCacheNexus() destroys a pixel cache nexus.
%
% The format of the DestroyPixelCacheNexus() method is:
%
% NexusInfo **DestroyPixelCacheNexus(NexusInfo *nexus_info,
% const size_t number_threads)
%
% A description of each parameter follows:
%
% o nexus_info: the nexus to destroy.
%
% o number_threads: the number of nexus threads.
%
*/
static inline void RelinquishCacheNexusPixels(NexusInfo *nexus_info)
{
if (nexus_info->mapped == MagickFalse)
(void) RelinquishAlignedMemory(nexus_info->cache);
else
(void) UnmapBlob(nexus_info->cache,(size_t) nexus_info->length);
nexus_info->cache=(Quantum *) NULL;
nexus_info->pixels=(Quantum *) NULL;
nexus_info->metacontent=(void *) NULL;
nexus_info->length=0;
nexus_info->mapped=MagickFalse;
}
MagickPrivate NexusInfo **DestroyPixelCacheNexus(NexusInfo **nexus_info,
const size_t number_threads)
{
ssize_t
i;
assert(nexus_info != (NexusInfo **) NULL);
for (i=0; i < (ssize_t) (2*number_threads); i++)
{
if (nexus_info[i]->cache != (Quantum *) NULL)
RelinquishCacheNexusPixels(nexus_info[i]);
nexus_info[i]->signature=(~MagickCoreSignature);
}
*nexus_info=(NexusInfo *) RelinquishMagickMemory(*nexus_info);
nexus_info=(NexusInfo **) RelinquishAlignedMemory(nexus_info);
return(nexus_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticMetacontent() returns the authentic metacontent corresponding
% with the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is
% returned if the associated pixels are not available.
%
% The format of the GetAuthenticMetacontent() method is:
%
% void *GetAuthenticMetacontent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void *GetAuthenticMetacontent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_metacontent_from_handler !=
(GetAuthenticMetacontentFromHandler) NULL)
{
void
*metacontent;
metacontent=cache_info->methods.
get_authentic_metacontent_from_handler(image);
return(metacontent);
}
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c M e t a c o n t e n t F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticMetacontentFromCache() returns the meta-content corresponding
% with the last call to QueueAuthenticPixelsCache() or
% GetAuthenticPixelsCache().
%
% The format of the GetAuthenticMetacontentFromCache() method is:
%
% void *GetAuthenticMetacontentFromCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void *GetAuthenticMetacontentFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->metacontent);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c O p e n C L B u f f e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticOpenCLBuffer() returns an OpenCL buffer used to execute OpenCL
% operations.
%
% The format of the GetAuthenticOpenCLBuffer() method is:
%
% cl_mem GetAuthenticOpenCLBuffer(const Image *image,
% MagickCLDevice device,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o device: the device to use.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate cl_mem GetAuthenticOpenCLBuffer(const Image *image,
MagickCLDevice device,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(device != (const MagickCLDevice) NULL);
cache_info=(CacheInfo *) image->cache;
if ((cache_info->type == UndefinedCache) || (cache_info->reference_count > 1))
{
SyncImagePixelCache((Image *) image,exception);
cache_info=(CacheInfo *) image->cache;
}
if ((cache_info->type != MemoryCache) || (cache_info->mapped != MagickFalse))
return((cl_mem) NULL);
LockSemaphoreInfo(cache_info->semaphore);
if ((cache_info->opencl != (MagickCLCacheInfo) NULL) &&
(cache_info->opencl->device->context != device->context))
cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl);
if (cache_info->opencl == (MagickCLCacheInfo) NULL)
{
assert(cache_info->pixels != (Quantum *) NULL);
cache_info->opencl=AcquireMagickCLCacheInfo(device,cache_info->pixels,
cache_info->length);
}
if (cache_info->opencl != (MagickCLCacheInfo) NULL)
RetainOpenCLMemObject(cache_info->opencl->buffer);
UnlockSemaphoreInfo(cache_info->semaphore);
if (cache_info->opencl == (MagickCLCacheInfo) NULL)
return((cl_mem) NULL);
assert(cache_info->opencl->pixels == cache_info->pixels);
return(cache_info->opencl->buffer);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelCacheNexus() gets authentic pixels from the in-memory or
% disk pixel cache as defined by the geometry parameters. A pointer to the
% pixels is returned if the pixels are transferred, otherwise a NULL is
% returned.
%
% The format of the GetAuthenticPixelCacheNexus() method is:
%
% Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to return.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
Quantum
*magick_restrict pixels;
/*
Transfer pixels from the cache.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickTrue,
nexus_info,exception);
if (pixels == (Quantum *) NULL)
return((Quantum *) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(pixels);
if (ReadPixelCachePixels(cache_info,nexus_info,exception) == MagickFalse)
return((Quantum *) NULL);
if (cache_info->metacontent_extent != 0)
if (ReadPixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse)
return((Quantum *) NULL);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelsFromCache() returns the pixels associated with the last
% call to the QueueAuthenticPixelsCache() or GetAuthenticPixelsCache() methods.
%
% The format of the GetAuthenticPixelsFromCache() method is:
%
% Quantum *GetAuthenticPixelsFromCache(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static Quantum *GetAuthenticPixelsFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c P i x e l Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelQueue() returns the authentic pixels associated
% corresponding with the last call to QueueAuthenticPixels() or
% GetAuthenticPixels().
%
% The format of the GetAuthenticPixelQueue() method is:
%
% Quantum *GetAuthenticPixelQueue(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Quantum *GetAuthenticPixelQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_pixels_from_handler !=
(GetAuthenticPixelsFromHandler) NULL)
return(cache_info->methods.get_authentic_pixels_from_handler(image));
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixels() obtains a pixel region for read/write access. If the
% region is successfully accessed, a pointer to a Quantum array
% representing the region is returned, otherwise NULL is returned.
%
% The returned pointer may point to a temporary working copy of the pixels
% or it may point to the original pixels in memory. Performance is maximized
% if the selected region is part of one row, or one or more full rows, since
% then there is opportunity to access the pixels in-place (without a copy)
% if the image is in memory, or in a memory-mapped file. The returned pointer
% must *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% Quantum. If the image has corresponding metacontent,call
% GetAuthenticMetacontent() after invoking GetAuthenticPixels() to obtain the
% meta-content corresponding to the region. Once the Quantum array has
% been updated, the changes must be saved back to the underlying image using
% SyncAuthenticPixels() or they may be lost.
%
% The format of the GetAuthenticPixels() method is:
%
% Quantum *GetAuthenticPixels(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Quantum *GetAuthenticPixels(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*pixels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_pixels_handler !=
(GetAuthenticPixelsHandler) NULL)
{
pixels=cache_info->methods.get_authentic_pixels_handler(image,x,y,columns,
rows,exception);
return(pixels);
}
assert(id < (int) cache_info->number_threads);
pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l s C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelsCache() gets pixels from the in-memory or disk pixel cache
% as defined by the geometry parameters. A pointer to the pixels is returned
% if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetAuthenticPixelsCache() method is:
%
% Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return((Quantum *) NULL);
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageExtent() returns the extent of the pixels associated corresponding
% with the last call to QueueAuthenticPixels() or GetAuthenticPixels().
%
% The format of the GetImageExtent() method is:
%
% MagickSizeType GetImageExtent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickSizeType GetImageExtent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetPixelCacheNexusExtent(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixelCache() ensures that there is only a single reference to the
% pixel cache to be modified, updating the provided cache pointer to point to
% a clone of the original pixel cache if necessary.
%
% The format of the GetImagePixelCache method is:
%
% Cache GetImagePixelCache(Image *image,const MagickBooleanType clone,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clone: any value other than MagickFalse clones the cache pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType ValidatePixelCacheMorphology(
const Image *magick_restrict image)
{
const CacheInfo
*magick_restrict cache_info;
const PixelChannelMap
*magick_restrict p,
*magick_restrict q;
/*
Does the image match the pixel cache morphology?
*/
cache_info=(CacheInfo *) image->cache;
p=image->channel_map;
q=cache_info->channel_map;
if ((image->storage_class != cache_info->storage_class) ||
(image->colorspace != cache_info->colorspace) ||
(image->alpha_trait != cache_info->alpha_trait) ||
(image->channels != cache_info->channels) ||
(image->columns != cache_info->columns) ||
(image->rows != cache_info->rows) ||
(image->number_channels != cache_info->number_channels) ||
(memcmp(p,q,image->number_channels*sizeof(*p)) != 0) ||
(image->metacontent_extent != cache_info->metacontent_extent) ||
(cache_info->nexus_info == (NexusInfo **) NULL))
return(MagickFalse);
return(MagickTrue);
}
static Cache GetImagePixelCache(Image *image,const MagickBooleanType clone,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickBooleanType
destroy,
status;
static MagickSizeType
cache_timelimit = MagickResourceInfinity,
cpu_throttle = MagickResourceInfinity,
cycles = 0;
status=MagickTrue;
if (cpu_throttle == MagickResourceInfinity)
cpu_throttle=GetMagickResourceLimit(ThrottleResource);
if ((cpu_throttle != 0) && ((cycles++ % 32) == 0))
MagickDelay(cpu_throttle);
if (cache_epoch == 0)
{
/*
Set the expire time in seconds.
*/
cache_timelimit=GetMagickResourceLimit(TimeResource);
cache_epoch=GetMagickTime();
}
if ((cache_timelimit != MagickResourceInfinity) &&
((MagickSizeType) (GetMagickTime()-cache_epoch) >= cache_timelimit))
{
#if defined(ECANCELED)
errno=ECANCELED;
#endif
cache_info=(CacheInfo *) image->cache;
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
ThrowFatalException(ResourceLimitFatalError,"TimeLimitExceeded");
}
LockSemaphoreInfo(image->semaphore);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
destroy=MagickFalse;
if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode))
{
LockSemaphoreInfo(cache_info->semaphore);
if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode))
{
CacheInfo
*clone_info;
Image
clone_image;
/*
Clone pixel cache.
*/
clone_image=(*image);
clone_image.semaphore=AcquireSemaphoreInfo();
clone_image.reference_count=1;
clone_image.cache=ClonePixelCache(cache_info);
clone_info=(CacheInfo *) clone_image.cache;
status=OpenPixelCache(&clone_image,IOMode,exception);
if (status == MagickFalse)
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
else
{
if (clone != MagickFalse)
status=ClonePixelCacheRepository(clone_info,cache_info,
exception);
if (status == MagickFalse)
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
else
{
destroy=MagickTrue;
image->cache=clone_info;
}
}
RelinquishSemaphoreInfo(&clone_image.semaphore);
}
UnlockSemaphoreInfo(cache_info->semaphore);
}
if (destroy != MagickFalse)
cache_info=(CacheInfo *) DestroyPixelCache(cache_info);
if (status != MagickFalse)
{
/*
Ensure the image matches the pixel cache morphology.
*/
if (image->type != UndefinedType)
image->type=UndefinedType;
if (ValidatePixelCacheMorphology(image) == MagickFalse)
{
status=OpenPixelCache(image,IOMode,exception);
cache_info=(CacheInfo *) image->cache;
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
}
}
UnlockSemaphoreInfo(image->semaphore);
if (status == MagickFalse)
return((Cache) NULL);
return(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e P i x e l C a c h e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixelCacheType() returns the pixel cache type: UndefinedCache,
% DiskCache, MemoryCache, MapCache, or PingCache.
%
% The format of the GetImagePixelCacheType() method is:
%
% CacheType GetImagePixelCacheType(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport CacheType GetImagePixelCacheType(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->type);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e A u t h e n t i c P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneAuthenticPixel() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs.
%
% The format of the GetOneAuthenticPixel() method is:
%
% MagickBooleanType GetOneAuthenticPixel(const Image image,const ssize_t x,
% const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType CopyPixel(const Image *image,
const Quantum *source,Quantum *destination)
{
ssize_t
i;
if (source == (const Quantum *) NULL)
{
destination[RedPixelChannel]=ClampToQuantum(image->background_color.red);
destination[GreenPixelChannel]=ClampToQuantum(
image->background_color.green);
destination[BluePixelChannel]=ClampToQuantum(
image->background_color.blue);
destination[BlackPixelChannel]=ClampToQuantum(
image->background_color.black);
destination[AlphaPixelChannel]=ClampToQuantum(
image->background_color.alpha);
return(MagickFalse);
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
destination[channel]=source[i];
}
return(MagickTrue);
}
MagickExport MagickBooleanType GetOneAuthenticPixel(Image *image,
const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
Quantum
*magick_restrict q;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
if (cache_info->methods.get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL)
return(cache_info->methods.get_one_authentic_pixel_from_handler(image,x,y,pixel,exception));
q=GetAuthenticPixelsCache(image,x,y,1UL,1UL,exception);
return(CopyPixel(image,q,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O n e A u t h e n t i c P i x e l F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneAuthenticPixelFromCache() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs.
%
% The format of the GetOneAuthenticPixelFromCache() method is:
%
% MagickBooleanType GetOneAuthenticPixelFromCache(const Image image,
% const ssize_t x,const ssize_t y,Quantum *pixel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetOneAuthenticPixelFromCache(Image *image,
const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict q;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
q=GetAuthenticPixelCacheNexus(image,x,y,1UL,1UL,cache_info->nexus_info[id],
exception);
return(CopyPixel(image,q,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixel() returns a single virtual pixel at the specified
% (x,y) location. The image background color is returned if an error occurs.
% If you plan to modify the pixel, use GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualPixel() method is:
%
% MagickBooleanType GetOneVirtualPixel(const Image image,const ssize_t x,
% const ssize_t y,Quantum *pixel,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualPixel(const Image *image,
const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
if (cache_info->methods.get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
return(cache_info->methods.get_one_virtual_pixel_from_handler(image,
GetPixelCacheVirtualMethod(image),x,y,pixel,exception));
assert(id < (int) cache_info->number_threads);
p=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y,
1UL,1UL,cache_info->nexus_info[id],exception);
return(CopyPixel(image,p,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O n e V i r t u a l P i x e l F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixelFromCache() returns a single virtual pixel at the
% specified (x,y) location. The image background color is returned if an
% error occurs.
%
% The format of the GetOneVirtualPixelFromCache() method is:
%
% MagickBooleanType GetOneVirtualPixelFromCache(const Image image,
% const VirtualPixelMethod method,const ssize_t x,const ssize_t y,
% Quantum *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetOneVirtualPixelFromCache(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
return(CopyPixel(image,p,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l P i x e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixelInfo() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs. If
% you plan to modify the pixel, use GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualPixelInfo() method is:
%
% MagickBooleanType GetOneVirtualPixelInfo(const Image image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,PixelInfo *pixel,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: these values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualPixelInfo(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
PixelInfo *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
GetPixelInfo(image,pixel);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
if (p == (const Quantum *) NULL)
return(MagickFalse);
GetPixelInfoPixel(image,p,pixel);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheColorspace() returns the colorspace of the pixel cache.
%
% The format of the GetPixelCacheColorspace() method is:
%
% Colorspace GetPixelCacheColorspace(const Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
MagickPrivate ColorspaceType GetPixelCacheColorspace(const Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->colorspace);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e F i l e n a m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheFilename() returns the filename associated with the pixel
% cache.
%
% The format of the GetPixelCacheFilename() method is:
%
% const char *GetPixelCacheFilename(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const char *GetPixelCacheFilename(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->cache_filename);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheMethods() initializes the CacheMethods structure.
%
% The format of the GetPixelCacheMethods() method is:
%
% void GetPixelCacheMethods(CacheMethods *cache_methods)
%
% A description of each parameter follows:
%
% o cache_methods: Specifies a pointer to a CacheMethods structure.
%
*/
MagickPrivate void GetPixelCacheMethods(CacheMethods *cache_methods)
{
assert(cache_methods != (CacheMethods *) NULL);
(void) memset(cache_methods,0,sizeof(*cache_methods));
cache_methods->get_virtual_pixel_handler=GetVirtualPixelCache;
cache_methods->get_virtual_pixels_handler=GetVirtualPixelsCache;
cache_methods->get_virtual_metacontent_from_handler=
GetVirtualMetacontentFromCache;
cache_methods->get_one_virtual_pixel_from_handler=GetOneVirtualPixelFromCache;
cache_methods->get_authentic_pixels_handler=GetAuthenticPixelsCache;
cache_methods->get_authentic_metacontent_from_handler=
GetAuthenticMetacontentFromCache;
cache_methods->get_authentic_pixels_from_handler=GetAuthenticPixelsFromCache;
cache_methods->get_one_authentic_pixel_from_handler=
GetOneAuthenticPixelFromCache;
cache_methods->queue_authentic_pixels_handler=QueueAuthenticPixelsCache;
cache_methods->sync_authentic_pixels_handler=SyncAuthenticPixelsCache;
cache_methods->destroy_pixel_handler=DestroyImagePixelCache;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e N e x u s E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheNexusExtent() returns the extent of the pixels associated
% corresponding with the last call to SetPixelCacheNexusPixels() or
% GetPixelCacheNexusPixels().
%
% The format of the GetPixelCacheNexusExtent() method is:
%
% MagickSizeType GetPixelCacheNexusExtent(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o nexus_info: the nexus info.
%
*/
MagickPrivate MagickSizeType GetPixelCacheNexusExtent(const Cache cache,
NexusInfo *magick_restrict nexus_info)
{
CacheInfo
*magick_restrict cache_info;
MagickSizeType
extent;
assert(cache != NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
extent=(MagickSizeType) nexus_info->region.width*nexus_info->region.height;
if (extent == 0)
return((MagickSizeType) cache_info->columns*cache_info->rows);
return(extent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCachePixels() returns the pixels associated with the specified image.
%
% The format of the GetPixelCachePixels() method is:
%
% void *GetPixelCachePixels(Image *image,MagickSizeType *length,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o length: the pixel cache length.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void *GetPixelCachePixels(Image *image,MagickSizeType *length,
ExceptionInfo *magick_unused(exception))
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
assert(length != (MagickSizeType *) NULL);
magick_unreferenced(exception);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*length=cache_info->length;
if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache))
return((void *) NULL);
return((void *) cache_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e S t o r a g e C l a s s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheStorageClass() returns the class type of the pixel cache.
%
% The format of the GetPixelCacheStorageClass() method is:
%
% ClassType GetPixelCacheStorageClass(Cache cache)
%
% A description of each parameter follows:
%
% o type: GetPixelCacheStorageClass returns DirectClass or PseudoClass.
%
% o cache: the pixel cache.
%
*/
MagickPrivate ClassType GetPixelCacheStorageClass(const Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->storage_class);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e T i l e S i z e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheTileSize() returns the pixel cache tile size.
%
% The format of the GetPixelCacheTileSize() method is:
%
% void GetPixelCacheTileSize(const Image *image,size_t *width,
% size_t *height)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the optimized cache tile width in pixels.
%
% o height: the optimized cache tile height in pixels.
%
*/
MagickPrivate void GetPixelCacheTileSize(const Image *image,size_t *width,
size_t *height)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*width=2048UL/(MagickMax(cache_info->number_channels,1)*sizeof(Quantum));
if (GetImagePixelCacheType(image) == DiskCache)
*width=8192UL/(MagickMax(cache_info->number_channels,1)*sizeof(Quantum));
*height=(*width);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e V i r t u a l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheVirtualMethod() gets the "virtual pixels" method for the
% pixel cache. A virtual pixel is any pixel access that is outside the
% boundaries of the image cache.
%
% The format of the GetPixelCacheVirtualMethod() method is:
%
% VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickPrivate VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->virtual_pixel_method);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l M e t a c o n t e n t F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualMetacontentFromCache() returns the meta-content corresponding with
% the last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache().
%
% The format of the GetVirtualMetacontentFromCache() method is:
%
% void *GetVirtualMetacontentFromCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static const void *GetVirtualMetacontentFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const void
*magick_restrict metacontent;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
metacontent=GetVirtualMetacontentFromNexus(cache_info,
cache_info->nexus_info[id]);
return(metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l M e t a c o n t e n t F r o m N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualMetacontentFromNexus() returns the meta-content for the specified
% cache nexus.
%
% The format of the GetVirtualMetacontentFromNexus() method is:
%
% const void *GetVirtualMetacontentFromNexus(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o nexus_info: the cache nexus to return the meta-content.
%
*/
MagickPrivate const void *GetVirtualMetacontentFromNexus(const Cache cache,
NexusInfo *magick_restrict nexus_info)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->storage_class == UndefinedClass)
return((void *) NULL);
return(nexus_info->metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualMetacontent() returns the virtual metacontent corresponding with
% the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is
% returned if the meta-content are not available.
%
% The format of the GetVirtualMetacontent() method is:
%
% const void *GetVirtualMetacontent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const void *GetVirtualMetacontent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const void
*magick_restrict metacontent;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
metacontent=cache_info->methods.get_virtual_metacontent_from_handler(image);
if (metacontent != (void *) NULL)
return(metacontent);
assert(id < (int) cache_info->number_threads);
metacontent=GetVirtualMetacontentFromNexus(cache_info,
cache_info->nexus_info[id]);
return(metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelCacheNexus() gets virtual pixels from the in-memory or disk
% pixel cache as defined by the geometry parameters. A pointer to the pixels
% is returned if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetVirtualPixelCacheNexus() method is:
%
% Quantum *GetVirtualPixelCacheNexus(const Image *image,
% const VirtualPixelMethod method,const ssize_t x,const ssize_t y,
% const size_t columns,const size_t rows,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to acquire.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t
DitherMatrix[64] =
{
0, 48, 12, 60, 3, 51, 15, 63,
32, 16, 44, 28, 35, 19, 47, 31,
8, 56, 4, 52, 11, 59, 7, 55,
40, 24, 36, 20, 43, 27, 39, 23,
2, 50, 14, 62, 1, 49, 13, 61,
34, 18, 46, 30, 33, 17, 45, 29,
10, 58, 6, 54, 9, 57, 5, 53,
42, 26, 38, 22, 41, 25, 37, 21
};
static inline ssize_t DitherX(const ssize_t x,const size_t columns)
{
ssize_t
index;
index=x+DitherMatrix[x & 0x07]-32L;
if (index < 0L)
return(0L);
if (index >= (ssize_t) columns)
return((ssize_t) columns-1L);
return(index);
}
static inline ssize_t DitherY(const ssize_t y,const size_t rows)
{
ssize_t
index;
index=y+DitherMatrix[y & 0x07]-32L;
if (index < 0L)
return(0L);
if (index >= (ssize_t) rows)
return((ssize_t) rows-1L);
return(index);
}
static inline ssize_t EdgeX(const ssize_t x,const size_t columns)
{
if (x < 0L)
return(0L);
if (x >= (ssize_t) columns)
return((ssize_t) (columns-1));
return(x);
}
static inline ssize_t EdgeY(const ssize_t y,const size_t rows)
{
if (y < 0L)
return(0L);
if (y >= (ssize_t) rows)
return((ssize_t) (rows-1));
return(y);
}
static inline ssize_t RandomX(RandomInfo *random_info,const size_t columns)
{
return((ssize_t) (columns*GetPseudoRandomValue(random_info)));
}
static inline ssize_t RandomY(RandomInfo *random_info,const size_t rows)
{
return((ssize_t) (rows*GetPseudoRandomValue(random_info)));
}
static inline MagickModulo VirtualPixelModulo(const ssize_t offset,
const size_t extent)
{
MagickModulo
modulo;
modulo.quotient=offset;
if (extent != 0)
modulo.quotient=offset/((ssize_t) extent);
modulo.remainder=offset % ((ssize_t) extent);
if ((modulo.remainder != 0) && ((offset ^ ((ssize_t) extent)) < 0))
{
modulo.quotient-=1;
modulo.remainder+=((ssize_t) extent);
}
return(modulo);
}
MagickPrivate const Quantum *GetVirtualPixelCacheNexus(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
offset;
MagickSizeType
length,
number_pixels;
NexusInfo
*magick_restrict virtual_nexus;
Quantum
*magick_restrict pixels,
virtual_pixel[MaxPixelChannels];
const Quantum
*magick_restrict p;
const void
*magick_restrict r;
Quantum
*magick_restrict q;
ssize_t
i,
u;
unsigned char
*magick_restrict s;
ssize_t
v;
void
*magick_restrict virtual_metacontent;
/*
Acquire pixels.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return((const Quantum *) NULL);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,x,y,columns,rows,
((image->channels & WriteMaskChannel) != 0) ||
((image->channels & CompositeMaskChannel) != 0) ? MagickTrue : MagickFalse,
nexus_info,exception);
if (pixels == (Quantum *) NULL)
return((const Quantum *) NULL);
q=pixels;
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) (nexus_info->region.height-1L)*cache_info->columns+
nexus_info->region.width-1L;
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
if ((offset >= 0) && (((MagickSizeType) offset+length) < number_pixels))
if ((x >= 0) && ((ssize_t) (x+columns-1) < (ssize_t) cache_info->columns) &&
(y >= 0) && ((ssize_t) (y+rows-1) < (ssize_t) cache_info->rows))
{
MagickBooleanType
status;
/*
Pixel request is inside cache extents.
*/
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(q);
status=ReadPixelCachePixels(cache_info,nexus_info,exception);
if (status == MagickFalse)
return((const Quantum *) NULL);
if (cache_info->metacontent_extent != 0)
{
status=ReadPixelCacheMetacontent(cache_info,nexus_info,exception);
if (status == MagickFalse)
return((const Quantum *) NULL);
}
return(q);
}
/*
Pixel request is outside cache extents.
*/
virtual_nexus=nexus_info->virtual_nexus;
s=(unsigned char *) nexus_info->metacontent;
(void) memset(virtual_pixel,0,cache_info->number_channels*
sizeof(*virtual_pixel));
virtual_metacontent=(void *) NULL;
switch (virtual_pixel_method)
{
case BackgroundVirtualPixelMethod:
case BlackVirtualPixelMethod:
case GrayVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
case EdgeVirtualPixelMethod:
case CheckerTileVirtualPixelMethod:
case HorizontalTileVirtualPixelMethod:
case VerticalTileVirtualPixelMethod:
{
if (cache_info->metacontent_extent != 0)
{
/*
Acquire a metacontent buffer.
*/
virtual_metacontent=(void *) AcquireQuantumMemory(1,
cache_info->metacontent_extent);
if (virtual_metacontent == (void *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
CacheError,"UnableToGetCacheNexus","`%s'",image->filename);
return((const Quantum *) NULL);
}
(void) memset(virtual_metacontent,0,cache_info->metacontent_extent);
}
switch (virtual_pixel_method)
{
case BlackVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel);
SetPixelAlpha(image,OpaqueAlpha,virtual_pixel);
break;
}
case GrayVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,QuantumRange/2,
virtual_pixel);
SetPixelAlpha(image,OpaqueAlpha,virtual_pixel);
break;
}
case TransparentVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel);
SetPixelAlpha(image,TransparentAlpha,virtual_pixel);
break;
}
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,QuantumRange,virtual_pixel);
SetPixelAlpha(image,OpaqueAlpha,virtual_pixel);
break;
}
default:
{
SetPixelRed(image,ClampToQuantum(image->background_color.red),
virtual_pixel);
SetPixelGreen(image,ClampToQuantum(image->background_color.green),
virtual_pixel);
SetPixelBlue(image,ClampToQuantum(image->background_color.blue),
virtual_pixel);
SetPixelBlack(image,ClampToQuantum(image->background_color.black),
virtual_pixel);
SetPixelAlpha(image,ClampToQuantum(image->background_color.alpha),
virtual_pixel);
break;
}
}
break;
}
default:
break;
}
for (v=0; v < (ssize_t) rows; v++)
{
ssize_t
y_offset;
y_offset=y+v;
if ((virtual_pixel_method == EdgeVirtualPixelMethod) ||
(virtual_pixel_method == UndefinedVirtualPixelMethod))
y_offset=EdgeY(y_offset,cache_info->rows);
for (u=0; u < (ssize_t) columns; u+=length)
{
ssize_t
x_offset;
x_offset=x+u;
length=(MagickSizeType) MagickMin(cache_info->columns-x_offset,columns-u);
if (((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) ||
((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) ||
(length == 0))
{
MagickModulo
x_modulo,
y_modulo;
/*
Transfer a single pixel.
*/
length=(MagickSizeType) 1;
switch (virtual_pixel_method)
{
case EdgeVirtualPixelMethod:
default:
{
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
EdgeX(x_offset,cache_info->columns),
EdgeY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case RandomVirtualPixelMethod:
{
if (cache_info->random_info == (RandomInfo *) NULL)
cache_info->random_info=AcquireRandomInfo();
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
RandomX(cache_info->random_info,cache_info->columns),
RandomY(cache_info->random_info,cache_info->rows),1UL,1UL,
virtual_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case DitherVirtualPixelMethod:
{
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
DitherX(x_offset,cache_info->columns),
DitherY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case TileVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case MirrorVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
if ((x_modulo.quotient & 0x01) == 1L)
x_modulo.remainder=(ssize_t) cache_info->columns-
x_modulo.remainder-1L;
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
if ((y_modulo.quotient & 0x01) == 1L)
y_modulo.remainder=(ssize_t) cache_info->rows-
y_modulo.remainder-1L;
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case HorizontalTileEdgeVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,EdgeY(y_offset,cache_info->rows),1UL,1UL,
virtual_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case VerticalTileEdgeVirtualPixelMethod:
{
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
EdgeX(x_offset,cache_info->columns),y_modulo.remainder,1UL,1UL,
virtual_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case BackgroundVirtualPixelMethod:
case BlackVirtualPixelMethod:
case GrayVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
case CheckerTileVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
if (((x_modulo.quotient ^ y_modulo.quotient) & 0x01) != 0L)
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case HorizontalTileVirtualPixelMethod:
{
if ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows))
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case VerticalTileVirtualPixelMethod:
{
if ((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns))
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
}
if (p == (const Quantum *) NULL)
break;
(void) memcpy(q,p,(size_t) (cache_info->number_channels*length*
sizeof(*p)));
q+=cache_info->number_channels;
if ((s != (void *) NULL) && (r != (const void *) NULL))
{
(void) memcpy(s,r,(size_t) cache_info->metacontent_extent);
s+=cache_info->metacontent_extent;
}
continue;
}
/*
Transfer a run of pixels.
*/
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x_offset,y_offset,
(size_t) length,1UL,virtual_nexus,exception);
if (p == (const Quantum *) NULL)
break;
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
(void) memcpy(q,p,(size_t) (cache_info->number_channels*length*
sizeof(*p)));
q+=cache_info->number_channels*length;
if ((r != (void *) NULL) && (s != (const void *) NULL))
{
(void) memcpy(s,r,(size_t) length);
s+=length*cache_info->metacontent_extent;
}
}
if (u < (ssize_t) columns)
break;
}
/*
Free resources.
*/
if (virtual_metacontent != (void *) NULL)
virtual_metacontent=(void *) RelinquishMagickMemory(virtual_metacontent);
if (v < (ssize_t) rows)
return((const Quantum *) NULL);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelCache() get virtual pixels from the in-memory or disk pixel
% cache as defined by the geometry parameters. A pointer to the pixels
% is returned if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetVirtualPixelCache() method is:
%
% const Quantum *GetVirtualPixelCache(const Image *image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const Quantum *GetVirtualPixelCache(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,columns,rows,
cache_info->nexus_info[id],exception);
return(p);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l P i x e l Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelQueue() returns the virtual pixels associated corresponding
% with the last call to QueueAuthenticPixels() or GetVirtualPixels().
%
% The format of the GetVirtualPixelQueue() method is:
%
% const Quantum *GetVirtualPixelQueue(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const Quantum *GetVirtualPixelQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_virtual_pixels_handler !=
(GetVirtualPixelsHandler) NULL)
return(cache_info->methods.get_virtual_pixels_handler(image));
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsNexus(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixels() returns an immutable pixel region. If the
% region is successfully accessed, a pointer to it is returned, otherwise
% NULL is returned. The returned pointer may point to a temporary working
% copy of the pixels or it may point to the original pixels in memory.
% Performance is maximized if the selected region is part of one row, or one
% or more full rows, since there is opportunity to access the pixels in-place
% (without a copy) if the image is in memory, or in a memory-mapped file. The
% returned pointer must *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% Quantum. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to
% access the meta-content (of type void) corresponding to the
% region.
%
% If you plan to modify the pixels, use GetAuthenticPixels() instead.
%
% Note, the GetVirtualPixels() and GetAuthenticPixels() methods are not thread-
% safe. In a threaded environment, use GetCacheViewVirtualPixels() or
% GetCacheViewAuthenticPixels() instead.
%
% The format of the GetVirtualPixels() method is:
%
% const Quantum *GetVirtualPixels(const Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport const Quantum *GetVirtualPixels(const Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_virtual_pixel_handler !=
(GetVirtualPixelHandler) NULL)
return(cache_info->methods.get_virtual_pixel_handler(image,
GetPixelCacheVirtualMethod(image),x,y,columns,rows,exception));
assert(id < (int) cache_info->number_threads);
p=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y,
columns,rows,cache_info->nexus_info[id],exception);
return(p);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsCache() returns the pixels associated corresponding with the
% last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache().
%
% The format of the GetVirtualPixelsCache() method is:
%
% Quantum *GetVirtualPixelsCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static const Quantum *GetVirtualPixelsCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsNexus(image->cache,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsNexus() returns the pixels associated with the specified
% cache nexus.
%
% The format of the GetVirtualPixelsNexus() method is:
%
% const Quantum *GetVirtualPixelsNexus(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o nexus_info: the cache nexus to return the colormap pixels.
%
*/
MagickPrivate const Quantum *GetVirtualPixelsNexus(const Cache cache,
NexusInfo *magick_restrict nexus_info)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->storage_class == UndefinedClass)
return((Quantum *) NULL);
return((const Quantum *) nexus_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M a s k P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MaskPixelCacheNexus() masks the cache nexus as defined by the composite mask.
% The method returns MagickTrue if the pixel region is masked, otherwise
% MagickFalse.
%
% The format of the MaskPixelCacheNexus() method is:
%
% MagickBooleanType MaskPixelCacheNexus(Image *image,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to clip.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Quantum ApplyPixelCompositeMask(const Quantum p,
const MagickRealType alpha,const Quantum q,const MagickRealType beta)
{
double
gamma;
if (fabs((double) (alpha-TransparentAlpha)) < MagickEpsilon)
return(q);
gamma=1.0-QuantumScale*QuantumScale*alpha*beta;
gamma=PerceptibleReciprocal(gamma);
return(ClampToQuantum(gamma*MagickOver_((double) p,alpha,(double) q,beta)));
}
static MagickBooleanType MaskPixelCacheNexus(Image *image,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
y;
/*
Apply composite mask.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->channels & CompositeMaskChannel) == 0)
return(MagickTrue);
if ((nexus_info->region.width == 0) || (nexus_info->region.height == 0))
return(MagickTrue);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return(MagickFalse);
p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y,
nexus_info->region.width,nexus_info->region.height,
nexus_info->virtual_nexus,exception);
q=nexus_info->pixels;
if ((p == (Quantum *) NULL) || (q == (Quantum *) NULL))
return(MagickFalse);
for (y=0; y < (ssize_t) nexus_info->region.height; y++)
{
ssize_t
x;
for (x=0; x < (ssize_t) nexus_info->region.width; x++)
{
double
alpha;
ssize_t
i;
alpha=(double) GetPixelCompositeMask(image,p);
for (i=0; i < (ssize_t) image->number_channels; i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ApplyPixelCompositeMask(q[i],alpha,p[i],GetPixelAlpha(image,p));
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ O p e n P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OpenPixelCache() allocates the pixel cache. This includes defining the cache
% dimensions, allocating space for the image pixels and optionally the
% metacontent, and memory mapping the cache if it is disk based. The cache
% nexus array is initialized as well.
%
% The format of the OpenPixelCache() method is:
%
% MagickBooleanType OpenPixelCache(Image *image,const MapMode mode,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o mode: ReadMode, WriteMode, or IOMode.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType OpenPixelCacheOnDisk(CacheInfo *cache_info,
const MapMode mode)
{
int
file;
/*
Open pixel cache on disk.
*/
if ((cache_info->file != -1) && (cache_info->disk_mode == mode))
return(MagickTrue); /* cache already open and in the proper mode */
if (*cache_info->cache_filename == '\0')
file=AcquireUniqueFileResource(cache_info->cache_filename);
else
switch (mode)
{
case ReadMode:
{
file=open_utf8(cache_info->cache_filename,O_RDONLY | O_BINARY,0);
break;
}
case WriteMode:
{
file=open_utf8(cache_info->cache_filename,O_WRONLY | O_CREAT |
O_BINARY | O_EXCL,S_MODE);
if (file == -1)
file=open_utf8(cache_info->cache_filename,O_WRONLY | O_BINARY,S_MODE);
break;
}
case IOMode:
default:
{
file=open_utf8(cache_info->cache_filename,O_RDWR | O_CREAT | O_BINARY |
O_EXCL,S_MODE);
if (file == -1)
file=open_utf8(cache_info->cache_filename,O_RDWR | O_BINARY,S_MODE);
break;
}
}
if (file == -1)
return(MagickFalse);
(void) AcquireMagickResource(FileResource,1);
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
cache_info->file=file;
cache_info->disk_mode=mode;
return(MagickTrue);
}
static inline MagickOffsetType WritePixelCacheRegion(
const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset,
const MagickSizeType length,const unsigned char *magick_restrict buffer)
{
MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PWRITE)
if (lseek(cache_info->file,offset,SEEK_SET) < 0)
return((MagickOffsetType) -1);
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PWRITE)
count=write(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
MAGICK_SSIZE_MAX));
#else
count=pwrite(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
MAGICK_SSIZE_MAX),offset+i);
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
return(i);
}
static MagickBooleanType SetPixelCacheExtent(Image *image,MagickSizeType length)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
count,
extent,
offset;
cache_info=(CacheInfo *) image->cache;
if (image->debug != MagickFalse)
{
char
format[MagickPathExtent],
message[MagickPathExtent];
(void) FormatMagickSize(length,MagickFalse,"B",MagickPathExtent,format);
(void) FormatLocaleString(message,MagickPathExtent,
"extend %s (%s[%d], disk, %s)",cache_info->filename,
cache_info->cache_filename,cache_info->file,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
if (length != (MagickSizeType) ((MagickOffsetType) length))
return(MagickFalse);
offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_END);
if (offset < 0)
return(MagickFalse);
if ((MagickSizeType) offset >= length)
count=(MagickOffsetType) 1;
else
{
extent=(MagickOffsetType) length-1;
count=WritePixelCacheRegion(cache_info,extent,1,(const unsigned char *)
"");
if (count != 1)
return(MagickFalse);
#if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE)
if (cache_info->synchronize != MagickFalse)
if (posix_fallocate(cache_info->file,offset+1,extent-offset) != 0)
return(MagickFalse);
#endif
}
offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_SET);
if (offset < 0)
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType OpenPixelCache(Image *image,const MapMode mode,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info,
source_info;
char
format[MagickPathExtent],
message[MagickPathExtent];
const char
*hosts,
*type;
MagickBooleanType
status;
MagickSizeType
length,
number_pixels;
size_t
columns,
packet_size;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (cache_anonymous_memory < 0)
{
char
*value;
/*
Does the security policy require anonymous mapping for pixel cache?
*/
cache_anonymous_memory=0;
value=GetPolicyValue("pixel-cache-memory");
if (value == (char *) NULL)
value=GetPolicyValue("cache:memory-map");
if (LocaleCompare(value,"anonymous") == 0)
{
#if defined(MAGICKCORE_HAVE_MMAP) && defined(MAP_ANONYMOUS)
cache_anonymous_memory=1;
#else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateError,"DelegateLibrarySupportNotBuiltIn",
"'%s' (policy requires anonymous memory mapping)",image->filename);
#endif
}
value=DestroyString(value);
}
if ((image->columns == 0) || (image->rows == 0))
ThrowBinaryException(CacheError,"NoPixelsDefinedInCache",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (((MagickSizeType) image->columns > cache_info->width_limit) ||
((MagickSizeType) image->rows > cache_info->height_limit))
ThrowBinaryException(ImageError,"WidthOrHeightExceedsLimit",
image->filename);
if (GetMagickResourceLimit(ListLengthResource) != MagickResourceInfinity)
{
length=GetImageListLength(image);
if (AcquireMagickResource(ListLengthResource,length) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"ListLengthExceedsLimit",
image->filename);
}
source_info=(*cache_info);
source_info.file=(-1);
(void) FormatLocaleString(cache_info->filename,MagickPathExtent,"%s[%.20g]",
image->filename,(double) image->scene);
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
cache_info->alpha_trait=image->alpha_trait;
cache_info->channels=image->channels;
cache_info->rows=image->rows;
cache_info->columns=image->columns;
InitializePixelChannelMap(image);
cache_info->number_channels=GetPixelChannels(image);
(void) memcpy(cache_info->channel_map,image->channel_map,MaxPixelChannels*
sizeof(*image->channel_map));
cache_info->metacontent_extent=image->metacontent_extent;
cache_info->mode=mode;
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
packet_size=MagickMax(cache_info->number_channels,1)*sizeof(Quantum);
if (image->metacontent_extent != 0)
packet_size+=cache_info->metacontent_extent;
length=number_pixels*packet_size;
columns=(size_t) (length/cache_info->rows/packet_size);
if ((cache_info->columns != columns) || ((ssize_t) cache_info->columns < 0) ||
((ssize_t) cache_info->rows < 0))
ThrowBinaryException(ResourceLimitError,"PixelCacheAllocationFailed",
image->filename);
cache_info->length=length;
if (image->ping != MagickFalse)
{
cache_info->type=PingCache;
return(MagickTrue);
}
status=AcquireMagickResource(AreaResource,(MagickSizeType)
cache_info->columns*cache_info->rows);
if (cache_info->mode == PersistMode)
status=MagickFalse;
length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+
cache_info->metacontent_extent);
if ((status != MagickFalse) &&
(length == (MagickSizeType) ((size_t) length)) &&
((cache_info->type == UndefinedCache) ||
(cache_info->type == MemoryCache)))
{
status=AcquireMagickResource(MemoryResource,cache_info->length);
if (status != MagickFalse)
{
status=MagickTrue;
if (cache_anonymous_memory <= 0)
{
cache_info->mapped=MagickFalse;
cache_info->pixels=(Quantum *) MagickAssumeAligned(
AcquireAlignedMemory(1,(size_t) cache_info->length));
}
else
{
cache_info->mapped=MagickTrue;
cache_info->pixels=(Quantum *) MapBlob(-1,IOMode,0,(size_t)
cache_info->length);
}
if (cache_info->pixels == (Quantum *) NULL)
{
cache_info->mapped=source_info.mapped;
cache_info->pixels=source_info.pixels;
}
else
{
/*
Create memory pixel cache.
*/
cache_info->type=MemoryCache;
cache_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
cache_info->metacontent=(void *) (cache_info->pixels+
cache_info->number_channels*number_pixels);
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickTrue,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s %s, %.20gx%.20gx%.20g %s)",
cache_info->filename,cache_info->mapped != MagickFalse ?
"Anonymous" : "Heap",type,(double) cache_info->columns,
(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
cache_info->storage_class=image->storage_class;
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
}
}
status=AcquireMagickResource(DiskResource,cache_info->length);
hosts=(const char *) GetImageRegistry(StringRegistryType,"cache:hosts",
exception);
if ((status == MagickFalse) && (hosts != (const char *) NULL))
{
DistributeCacheInfo
*server_info;
/*
Distribute the pixel cache to a remote server.
*/
server_info=AcquireDistributeCacheInfo(exception);
if (server_info != (DistributeCacheInfo *) NULL)
{
status=OpenDistributePixelCache(server_info,image);
if (status == MagickFalse)
{
ThrowFileException(exception,CacheError,"UnableToOpenPixelCache",
GetDistributeCacheHostname(server_info));
server_info=DestroyDistributeCacheInfo(server_info);
}
else
{
/*
Create a distributed pixel cache.
*/
status=MagickTrue;
cache_info->type=DistributedCache;
cache_info->server_info=server_info;
(void) FormatLocaleString(cache_info->cache_filename,
MagickPathExtent,"%s:%d",GetDistributeCacheHostname(
(DistributeCacheInfo *) cache_info->server_info),
GetDistributeCachePort((DistributeCacheInfo *)
cache_info->server_info));
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickFalse,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",
cache_info->filename,cache_info->cache_filename,
GetDistributeCacheFile((DistributeCacheInfo *)
cache_info->server_info),type,(double) cache_info->columns,
(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
}
cache_info->type=UndefinedCache;
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
/*
Create pixel cache on disk.
*/
if (status == MagickFalse)
{
cache_info->type=UndefinedCache;
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode) &&
(cache_info->mode != PersistMode))
{
(void) ClosePixelCacheOnDisk(cache_info);
*cache_info->cache_filename='\0';
}
if (OpenPixelCacheOnDisk(cache_info,mode) == MagickFalse)
{
cache_info->type=UndefinedCache;
ThrowFileException(exception,CacheError,"UnableToOpenPixelCache",
image->filename);
return(MagickFalse);
}
status=SetPixelCacheExtent(image,(MagickSizeType) cache_info->offset+
cache_info->length);
if (status == MagickFalse)
{
cache_info->type=UndefinedCache;
ThrowFileException(exception,CacheError,"UnableToExtendCache",
image->filename);
return(MagickFalse);
}
cache_info->type=DiskCache;
length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+
cache_info->metacontent_extent);
if (length == (MagickSizeType) ((size_t) length))
{
status=AcquireMagickResource(MapResource,cache_info->length);
if (status != MagickFalse)
{
cache_info->pixels=(Quantum *) MapBlob(cache_info->file,mode,
cache_info->offset,(size_t) cache_info->length);
if (cache_info->pixels == (Quantum *) NULL)
{
cache_info->mapped=source_info.mapped;
cache_info->pixels=source_info.pixels;
RelinquishMagickResource(MapResource,cache_info->length);
}
else
{
/*
Create file-backed memory-mapped pixel cache.
*/
(void) ClosePixelCacheOnDisk(cache_info);
cache_info->type=MapCache;
cache_info->mapped=MagickTrue;
cache_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
cache_info->metacontent=(void *) (cache_info->pixels+
cache_info->number_channels*number_pixels);
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickTrue,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",
cache_info->filename,cache_info->cache_filename,
cache_info->file,type,(double) cache_info->columns,
(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
}
}
status=MagickTrue;
if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickFalse,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",cache_info->filename,
cache_info->cache_filename,cache_info->file,type,(double)
cache_info->columns,(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r s i s t P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PersistPixelCache() attaches to or initializes a persistent pixel cache. A
% persistent pixel cache is one that resides on disk and is not destroyed
% when the program exits.
%
% The format of the PersistPixelCache() method is:
%
% MagickBooleanType PersistPixelCache(Image *image,const char *filename,
% const MagickBooleanType attach,MagickOffsetType *offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o filename: the persistent pixel cache filename.
%
% o attach: A value other than zero initializes the persistent pixel cache.
%
% o initialize: A value other than zero initializes the persistent pixel
% cache.
%
% o offset: the offset in the persistent cache to store pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType PersistPixelCache(Image *image,
const char *filename,const MagickBooleanType attach,MagickOffsetType *offset,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info,
*magick_restrict clone_info;
MagickBooleanType
status;
ssize_t
page_size;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (void *) NULL);
assert(filename != (const char *) NULL);
assert(offset != (MagickOffsetType *) NULL);
page_size=GetMagickPageSize();
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
if (attach != MagickFalse)
{
/*
Attach existing persistent pixel cache.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"attach persistent cache");
(void) CopyMagickString(cache_info->cache_filename,filename,
MagickPathExtent);
cache_info->type=MapCache;
cache_info->offset=(*offset);
if (OpenPixelCache(image,ReadMode,exception) == MagickFalse)
return(MagickFalse);
*offset+=cache_info->length+page_size-(cache_info->length % page_size);
return(MagickTrue);
}
/*
Clone persistent pixel cache.
*/
status=AcquireMagickResource(DiskResource,cache_info->length);
if (status == MagickFalse)
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
clone_info=(CacheInfo *) ClonePixelCache(cache_info);
clone_info->type=DiskCache;
(void) CopyMagickString(clone_info->cache_filename,filename,MagickPathExtent);
clone_info->file=(-1);
clone_info->storage_class=cache_info->storage_class;
clone_info->colorspace=cache_info->colorspace;
clone_info->alpha_trait=cache_info->alpha_trait;
clone_info->channels=cache_info->channels;
clone_info->columns=cache_info->columns;
clone_info->rows=cache_info->rows;
clone_info->number_channels=cache_info->number_channels;
clone_info->metacontent_extent=cache_info->metacontent_extent;
clone_info->mode=PersistMode;
clone_info->length=cache_info->length;
(void) memcpy(clone_info->channel_map,cache_info->channel_map,
MaxPixelChannels*sizeof(*cache_info->channel_map));
clone_info->offset=(*offset);
status=ClonePixelCacheRepository(clone_info,cache_info,exception);
*offset+=cache_info->length+page_size-(cache_info->length % page_size);
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u e u e A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixelCacheNexus() allocates an region to store image pixels as
% defined by the region rectangle and returns a pointer to the region. This
% region is subsequently transferred from the pixel cache with
% SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the
% pixels are transferred, otherwise a NULL is returned.
%
% The format of the QueueAuthenticPixelCacheNexus() method is:
%
% Quantum *QueueAuthenticPixelCacheNexus(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% const MagickBooleanType clone,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to set.
%
% o clone: clone the pixel cache.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate Quantum *QueueAuthenticPixelCacheNexus(Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
const MagickBooleanType clone,NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
offset;
MagickSizeType
number_pixels;
Quantum
*magick_restrict pixels;
/*
Validate pixel cache geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) GetImagePixelCache(image,clone,exception);
if (cache_info == (Cache) NULL)
return((Quantum *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if ((cache_info->columns == 0) || (cache_info->rows == 0) || (x < 0) ||
(y < 0) || (x >= (ssize_t) cache_info->columns) ||
(y >= (ssize_t) cache_info->rows))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"PixelsAreNotAuthentic","`%s'",image->filename);
return((Quantum *) NULL);
}
offset=(MagickOffsetType) y*cache_info->columns+x;
if (offset < 0)
return((Quantum *) NULL);
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
offset+=(MagickOffsetType) (rows-1)*cache_info->columns+columns-1;
if ((MagickSizeType) offset >= number_pixels)
return((Quantum *) NULL);
/*
Return pixel cache.
*/
pixels=SetPixelCacheNexusPixels(cache_info,WriteMode,x,y,columns,rows,
((image->channels & WriteMaskChannel) != 0) ||
((image->channels & CompositeMaskChannel) != 0) ? MagickTrue : MagickFalse,
nexus_info,exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u e u e A u t h e n t i c P i x e l s C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixelsCache() allocates an region to store image pixels as
% defined by the region rectangle and returns a pointer to the region. This
% region is subsequently transferred from the pixel cache with
% SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the
% pixels are transferred, otherwise a NULL is returned.
%
% The format of the QueueAuthenticPixelsCache() method is:
%
% Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u e u e A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixels() queues a mutable pixel region. If the region is
% successfully initialized a pointer to a Quantum array representing the
% region is returned, otherwise NULL is returned. The returned pointer may
% point to a temporary working buffer for the pixels or it may point to the
% final location of the pixels in memory.
%
% Write-only access means that any existing pixel values corresponding to
% the region are ignored. This is useful if the initial image is being
% created from scratch, or if the existing pixel values are to be
% completely replaced without need to refer to their pre-existing values.
% The application is free to read and write the pixel buffer returned by
% QueueAuthenticPixels() any way it pleases. QueueAuthenticPixels() does not
% initialize the pixel array values. Initializing pixel array values is the
% application's responsibility.
%
% Performance is maximized if the selected region is part of one row, or
% one or more full rows, since then there is opportunity to access the
% pixels in-place (without a copy) if the image is in memory, or in a
% memory-mapped file. The returned pointer must *never* be deallocated
% by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% Quantum. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to
% obtain the meta-content (of type void) corresponding to the region.
% Once the Quantum (and/or Quantum) array has been updated, the
% changes must be saved back to the underlying image using
% SyncAuthenticPixels() or they may be lost.
%
% The format of the QueueAuthenticPixels() method is:
%
% Quantum *QueueAuthenticPixels(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Quantum *QueueAuthenticPixels(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict pixels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.queue_authentic_pixels_handler !=
(QueueAuthenticPixelsHandler) NULL)
{
pixels=cache_info->methods.queue_authentic_pixels_handler(image,x,y,
columns,rows,exception);
return(pixels);
}
assert(id < (int) cache_info->number_threads);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d P i x e l C a c h e M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPixelCacheMetacontent() reads metacontent from the specified region of
% the pixel cache.
%
% The format of the ReadPixelCacheMetacontent() method is:
%
% MagickBooleanType ReadPixelCacheMetacontent(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to read the metacontent.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickOffsetType ReadPixelCacheRegion(
const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset,
const MagickSizeType length,unsigned char *magick_restrict buffer)
{
MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PREAD)
if (lseek(cache_info->file,offset,SEEK_SET) < 0)
return((MagickOffsetType) -1);
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PREAD)
count=read(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
MAGICK_SSIZE_MAX));
#else
count=pread(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
MAGICK_SSIZE_MAX),offset+i);
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
return(i);
}
static MagickBooleanType ReadPixelCacheMetacontent(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
ssize_t
y;
unsigned char
*magick_restrict q;
size_t
rows;
if (cache_info->metacontent_extent == 0)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*
cache_info->metacontent_extent;
extent=length*nexus_info->region.height;
rows=nexus_info->region.height;
y=0;
q=(unsigned char *) nexus_info->metacontent;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
unsigned char
*magick_restrict p;
/*
Read meta-content from memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
p=(unsigned char *) cache_info->metacontent+offset*
cache_info->metacontent_extent;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->metacontent_extent*cache_info->columns;
q+=cache_info->metacontent_extent*nexus_info->region.width;
}
break;
}
case DiskCache:
{
/*
Read meta content from disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
extent=(MagickSizeType) cache_info->columns*cache_info->rows;
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadPixelCacheRegion(cache_info,cache_info->offset+extent*
cache_info->number_channels*sizeof(Quantum)+offset*
cache_info->metacontent_extent,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
offset+=cache_info->columns;
q+=cache_info->metacontent_extent*nexus_info->region.width;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Read metacontent from distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadDistributePixelCacheMetacontent((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
q+=cache_info->metacontent_extent*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToReadPixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPixelCachePixels() reads pixels from the specified region of the pixel
% cache.
%
% The format of the ReadPixelCachePixels() method is:
%
% MagickBooleanType ReadPixelCachePixels(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to read the pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ReadPixelCachePixels(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
Quantum
*magick_restrict q;
ssize_t
y;
size_t
number_channels,
rows;
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns;
if ((ssize_t) (offset/cache_info->columns) != nexus_info->region.y)
return(MagickFalse);
offset+=nexus_info->region.x;
number_channels=cache_info->number_channels;
length=(MagickSizeType) number_channels*nexus_info->region.width*
sizeof(Quantum);
if ((length/number_channels/sizeof(Quantum)) != nexus_info->region.width)
return(MagickFalse);
rows=nexus_info->region.height;
extent=length*rows;
if ((extent == 0) || ((extent/length) != rows))
return(MagickFalse);
y=0;
q=nexus_info->pixels;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
Quantum
*magick_restrict p;
/*
Read pixels from memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
p=cache_info->pixels+cache_info->number_channels*offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->number_channels*cache_info->columns;
q+=cache_info->number_channels*nexus_info->region.width;
}
break;
}
case DiskCache:
{
/*
Read pixels from disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadPixelCacheRegion(cache_info,cache_info->offset+offset*
cache_info->number_channels*sizeof(*q),length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
offset+=cache_info->columns;
q+=cache_info->number_channels*nexus_info->region.width;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Read pixels from distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadDistributePixelCachePixels((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
q+=cache_info->number_channels*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToReadPixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e f e r e n c e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReferencePixelCache() increments the reference count associated with the
% pixel cache returning a pointer to the cache.
%
% The format of the ReferencePixelCache method is:
%
% Cache ReferencePixelCache(Cache cache_info)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
*/
MagickPrivate Cache ReferencePixelCache(Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache *) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
LockSemaphoreInfo(cache_info->semaphore);
cache_info->reference_count++;
UnlockSemaphoreInfo(cache_info->semaphore);
return(cache_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e s e t P i x e l C a c h e C h a n n e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetPixelCacheChannels() resets the pixel cache channels.
%
% The format of the ResetPixelCacheChannels method is:
%
% void ResetPixelCacheChannels(Image *)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickPrivate void ResetPixelCacheChannels(Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
cache_info->number_channels=GetPixelChannels(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e s e t C a c h e A n o n y m o u s M e m o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetCacheAnonymousMemory() resets the anonymous_memory value.
%
% The format of the ResetCacheAnonymousMemory method is:
%
% void ResetCacheAnonymousMemory(void)
%
*/
MagickPrivate void ResetCacheAnonymousMemory(void)
{
cache_anonymous_memory=0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e s e t P i x e l C a c h e E p o c h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetPixelCacheEpoch() resets the pixel cache epoch.
%
% The format of the ResetPixelCacheEpoch method is:
%
% void ResetPixelCacheEpoch(void)
%
*/
MagickPrivate void ResetPixelCacheEpoch(void)
{
cache_epoch=0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheMethods() sets the image pixel methods to the specified ones.
%
% The format of the SetPixelCacheMethods() method is:
%
% SetPixelCacheMethods(Cache *,CacheMethods *cache_methods)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o cache_methods: Specifies a pointer to a CacheMethods structure.
%
*/
MagickPrivate void SetPixelCacheMethods(Cache cache,CacheMethods *cache_methods)
{
CacheInfo
*magick_restrict cache_info;
GetOneAuthenticPixelFromHandler
get_one_authentic_pixel_from_handler;
GetOneVirtualPixelFromHandler
get_one_virtual_pixel_from_handler;
/*
Set cache pixel methods.
*/
assert(cache != (Cache) NULL);
assert(cache_methods != (CacheMethods *) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
if (cache_methods->get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL)
cache_info->methods.get_virtual_pixel_handler=
cache_methods->get_virtual_pixel_handler;
if (cache_methods->destroy_pixel_handler != (DestroyPixelHandler) NULL)
cache_info->methods.destroy_pixel_handler=
cache_methods->destroy_pixel_handler;
if (cache_methods->get_virtual_metacontent_from_handler !=
(GetVirtualMetacontentFromHandler) NULL)
cache_info->methods.get_virtual_metacontent_from_handler=
cache_methods->get_virtual_metacontent_from_handler;
if (cache_methods->get_authentic_pixels_handler !=
(GetAuthenticPixelsHandler) NULL)
cache_info->methods.get_authentic_pixels_handler=
cache_methods->get_authentic_pixels_handler;
if (cache_methods->queue_authentic_pixels_handler !=
(QueueAuthenticPixelsHandler) NULL)
cache_info->methods.queue_authentic_pixels_handler=
cache_methods->queue_authentic_pixels_handler;
if (cache_methods->sync_authentic_pixels_handler !=
(SyncAuthenticPixelsHandler) NULL)
cache_info->methods.sync_authentic_pixels_handler=
cache_methods->sync_authentic_pixels_handler;
if (cache_methods->get_authentic_pixels_from_handler !=
(GetAuthenticPixelsFromHandler) NULL)
cache_info->methods.get_authentic_pixels_from_handler=
cache_methods->get_authentic_pixels_from_handler;
if (cache_methods->get_authentic_metacontent_from_handler !=
(GetAuthenticMetacontentFromHandler) NULL)
cache_info->methods.get_authentic_metacontent_from_handler=
cache_methods->get_authentic_metacontent_from_handler;
get_one_virtual_pixel_from_handler=
cache_info->methods.get_one_virtual_pixel_from_handler;
if (get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
cache_info->methods.get_one_virtual_pixel_from_handler=
cache_methods->get_one_virtual_pixel_from_handler;
get_one_authentic_pixel_from_handler=
cache_methods->get_one_authentic_pixel_from_handler;
if (get_one_authentic_pixel_from_handler !=
(GetOneAuthenticPixelFromHandler) NULL)
cache_info->methods.get_one_authentic_pixel_from_handler=
cache_methods->get_one_authentic_pixel_from_handler;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t P i x e l C a c h e N e x u s P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheNexusPixels() defines the region of the cache for the
% specified cache nexus.
%
% The format of the SetPixelCacheNexusPixels() method is:
%
% Quantum SetPixelCacheNexusPixels(
% const CacheInfo *magick_restrict cache_info,const MapMode mode,
% const ssize_t x,const ssize_t y,const size_t width,const size_t height,
% const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o mode: ReadMode, WriteMode, or IOMode.
%
% o x,y,width,height: define the region of this particular cache nexus.
%
% o buffered: if true, nexus pixels are buffered.
%
% o nexus_info: the cache nexus to set.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType AcquireCacheNexusPixels(
const CacheInfo *magick_restrict cache_info,const MagickSizeType length,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
if (length != (MagickSizeType) ((size_t) length))
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"PixelCacheAllocationFailed","`%s'",
cache_info->filename);
return(MagickFalse);
}
nexus_info->length=0;
nexus_info->mapped=MagickFalse;
if (cache_anonymous_memory <= 0)
{
nexus_info->cache=(Quantum *) MagickAssumeAligned(AcquireAlignedMemory(1,
(size_t) length));
if (nexus_info->cache != (Quantum *) NULL)
(void) memset(nexus_info->cache,0,(size_t) length);
}
else
{
nexus_info->cache=(Quantum *) MapBlob(-1,IOMode,0,(size_t) length);
if (nexus_info->cache != (Quantum *) NULL)
nexus_info->mapped=MagickTrue;
}
if (nexus_info->cache == (Quantum *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"PixelCacheAllocationFailed","`%s'",
cache_info->filename);
return(MagickFalse);
}
nexus_info->length=length;
return(MagickTrue);
}
static inline void PrefetchPixelCacheNexusPixels(const NexusInfo *nexus_info,
const MapMode mode)
{
if (nexus_info->length < CACHE_LINE_SIZE)
return;
if (mode == ReadMode)
{
MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE,
0,1);
return;
}
MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE,1,1);
}
static inline MagickBooleanType ValidatePixelOffset(const ssize_t x,
const size_t a)
{
if ((x >= 0) && (x >= ((ssize_t) MAGICK_SSIZE_MAX-(ssize_t) a)))
return(MagickFalse);
if (x <= ((ssize_t) MAGICK_SSIZE_MIN+(ssize_t) a))
return(MagickFalse);
return(MagickTrue);
}
static Quantum *SetPixelCacheNexusPixels(
const CacheInfo *magick_restrict cache_info,const MapMode mode,
const ssize_t x,const ssize_t y,const size_t width,const size_t height,
const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickBooleanType
status;
MagickSizeType
length,
number_pixels;
assert(cache_info != (const CacheInfo *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return((Quantum *) NULL);
assert(nexus_info->signature == MagickCoreSignature);
(void) memset(&nexus_info->region,0,sizeof(nexus_info->region));
if ((width == 0) || (height == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"NoPixelsDefinedInCache","`%s'",cache_info->filename);
return((Quantum *) NULL);
}
if (((MagickSizeType) width > cache_info->width_limit) ||
((MagickSizeType) height > cache_info->height_limit) ||
(ValidatePixelOffset(x,width) == MagickFalse) ||
(ValidatePixelOffset(y,height) == MagickFalse))
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"WidthOrHeightExceedsLimit","`%s'",cache_info->filename);
return((Quantum *) NULL);
}
if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) &&
(buffered == MagickFalse))
{
if (((x >= 0) && (y >= 0) &&
(((ssize_t) height+y-1) < (ssize_t) cache_info->rows)) &&
(((x == 0) && (width == cache_info->columns)) || ((height == 1) &&
(((ssize_t) width+x-1) < (ssize_t) cache_info->columns))))
{
MagickOffsetType
offset;
/*
Pixels are accessed directly from memory.
*/
offset=(MagickOffsetType) y*cache_info->columns+x;
nexus_info->pixels=cache_info->pixels+cache_info->number_channels*
offset;
nexus_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
nexus_info->metacontent=(unsigned char *) cache_info->metacontent+
offset*cache_info->metacontent_extent;
nexus_info->region.width=width;
nexus_info->region.height=height;
nexus_info->region.x=x;
nexus_info->region.y=y;
nexus_info->authentic_pixel_cache=MagickTrue;
PrefetchPixelCacheNexusPixels(nexus_info,mode);
return(nexus_info->pixels);
}
}
/*
Pixels are stored in a staging region until they are synced to the cache.
*/
number_pixels=(MagickSizeType) width*height;
length=MagickMax(number_pixels,MagickMax(cache_info->columns,
cache_info->rows))*cache_info->number_channels*sizeof(*nexus_info->pixels);
if (cache_info->metacontent_extent != 0)
length+=number_pixels*cache_info->metacontent_extent;
status=MagickTrue;
if (nexus_info->cache == (Quantum *) NULL)
status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception);
else
if (nexus_info->length < length)
{
RelinquishCacheNexusPixels(nexus_info);
status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception);
}
if (status == MagickFalse)
return((Quantum *) NULL);
nexus_info->pixels=nexus_info->cache;
nexus_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
nexus_info->metacontent=(void *) (nexus_info->pixels+
cache_info->number_channels*number_pixels);
nexus_info->region.width=width;
nexus_info->region.height=height;
nexus_info->region.x=x;
nexus_info->region.y=y;
nexus_info->authentic_pixel_cache=cache_info->type == PingCache ?
MagickTrue : MagickFalse;
PrefetchPixelCacheNexusPixels(nexus_info,mode);
return(nexus_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t P i x e l C a c h e V i r t u a l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheVirtualMethod() sets the "virtual pixels" method for the
% pixel cache and returns the previous setting. A virtual pixel is any pixel
% access that is outside the boundaries of the image cache.
%
% The format of the SetPixelCacheVirtualMethod() method is:
%
% VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image,
% const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: choose the type of virtual pixel.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType SetCacheAlphaChannel(Image *image,const Quantum alpha,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
CacheView
*magick_restrict image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
image->alpha_trait=BlendPixelTrait;
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception); /* must be virtual */
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelAlpha(image,alpha,q);
q+=GetPixelChannels(image);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
}
image_view=DestroyCacheView(image_view);
return(status);
}
MagickPrivate VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image,
const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
VirtualPixelMethod
method;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
method=cache_info->virtual_pixel_method;
cache_info->virtual_pixel_method=virtual_pixel_method;
if ((image->columns != 0) && (image->rows != 0))
switch (virtual_pixel_method)
{
case BackgroundVirtualPixelMethod:
{
if ((image->background_color.alpha_trait != UndefinedPixelTrait) &&
(image->alpha_trait == UndefinedPixelTrait))
(void) SetCacheAlphaChannel(image,OpaqueAlpha,exception);
if ((IsPixelInfoGray(&image->background_color) == MagickFalse) &&
(IsGrayColorspace(image->colorspace) != MagickFalse))
(void) SetImageColorspace(image,sRGBColorspace,exception);
break;
}
case TransparentVirtualPixelMethod:
{
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetCacheAlphaChannel(image,OpaqueAlpha,exception);
break;
}
default:
break;
}
return(method);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c O p e n C L B u f f e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticOpenCLBuffer() makes sure that all the OpenCL operations have
% been completed and updates the host memory.
%
% The format of the SyncAuthenticOpenCLBuffer() method is:
%
% void SyncAuthenticOpenCLBuffer(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void CopyOpenCLBuffer(CacheInfo *magick_restrict cache_info)
{
assert(cache_info != (CacheInfo *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if ((cache_info->type != MemoryCache) ||
(cache_info->opencl == (MagickCLCacheInfo) NULL))
return;
/*
Ensure single threaded access to OpenCL environment.
*/
LockSemaphoreInfo(cache_info->semaphore);
cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl);
UnlockSemaphoreInfo(cache_info->semaphore);
}
MagickPrivate void SyncAuthenticOpenCLBuffer(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
cache_info=(CacheInfo *) image->cache;
CopyOpenCLBuffer(cache_info);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixelCacheNexus() saves the authentic image pixels to the
% in-memory or disk cache. The method returns MagickTrue if the pixel region
% is synced, otherwise MagickFalse.
%
% The format of the SyncAuthenticPixelCacheNexus() method is:
%
% MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to sync.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickBooleanType
status;
/*
Transfer pixels to the cache.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->cache == (Cache) NULL)
ThrowBinaryException(CacheError,"PixelCacheIsNotOpen",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return(MagickFalse);
if (image->mask_trait != UpdatePixelTrait)
{
if (((image->channels & WriteMaskChannel) != 0) &&
(ClipPixelCacheNexus(image,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if (((image->channels & CompositeMaskChannel) != 0) &&
(MaskPixelCacheNexus(image,nexus_info,exception) == MagickFalse))
return(MagickFalse);
}
if (nexus_info->authentic_pixel_cache != MagickFalse)
{
if (image->taint == MagickFalse)
image->taint=MagickTrue;
return(MagickTrue);
}
assert(cache_info->signature == MagickCoreSignature);
status=WritePixelCachePixels(cache_info,nexus_info,exception);
if ((cache_info->metacontent_extent != 0) &&
(WritePixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if ((status != MagickFalse) && (image->taint == MagickFalse))
image->taint=MagickTrue;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixelsCache() saves the authentic image pixels to the in-memory
% or disk cache. The method returns MagickTrue if the pixel region is synced,
% otherwise MagickFalse.
%
% The format of the SyncAuthenticPixelsCache() method is:
%
% MagickBooleanType SyncAuthenticPixelsCache(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType SyncAuthenticPixelsCache(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id],
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixels() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is flushed, otherwise
% MagickFalse.
%
% The format of the SyncAuthenticPixels() method is:
%
% MagickBooleanType SyncAuthenticPixels(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SyncAuthenticPixels(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL)
{
status=cache_info->methods.sync_authentic_pixels_handler(image,
exception);
return(status);
}
assert(id < (int) cache_info->number_threads);
status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id],
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImagePixelCache() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is flushed, otherwise
% MagickFalse.
%
% The format of the SyncImagePixelCache() method is:
%
% MagickBooleanType SyncImagePixelCache(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate MagickBooleanType SyncImagePixelCache(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(exception != (ExceptionInfo *) NULL);
cache_info=(CacheInfo *) GetImagePixelCache(image,MagickTrue,exception);
return(cache_info == (CacheInfo *) NULL ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ W r i t e P i x e l C a c h e M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePixelCacheMetacontent() writes the meta-content to the specified region
% of the pixel cache.
%
% The format of the WritePixelCacheMetacontent() method is:
%
% MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to write the meta-content.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
const unsigned char
*magick_restrict p;
ssize_t
y;
size_t
rows;
if (cache_info->metacontent_extent == 0)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*
cache_info->metacontent_extent;
extent=(MagickSizeType) length*nexus_info->region.height;
rows=nexus_info->region.height;
y=0;
p=(unsigned char *) nexus_info->metacontent;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
unsigned char
*magick_restrict q;
/*
Write associated pixels to memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
q=(unsigned char *) cache_info->metacontent+offset*
cache_info->metacontent_extent;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=nexus_info->region.width*cache_info->metacontent_extent;
q+=cache_info->columns*cache_info->metacontent_extent;
}
break;
}
case DiskCache:
{
/*
Write associated pixels to disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
extent=(MagickSizeType) cache_info->columns*cache_info->rows;
for (y=0; y < (ssize_t) rows; y++)
{
count=WritePixelCacheRegion(cache_info,cache_info->offset+extent*
cache_info->number_channels*sizeof(Quantum)+offset*
cache_info->metacontent_extent,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->metacontent_extent*nexus_info->region.width;
offset+=cache_info->columns;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Write metacontent to distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WriteDistributePixelCacheMetacontent((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->metacontent_extent*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToWritePixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ W r i t e C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePixelCachePixels() writes image pixels to the specified region of the
% pixel cache.
%
% The format of the WritePixelCachePixels() method is:
%
% MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to write the pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePixelCachePixels(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
const Quantum
*magick_restrict p;
ssize_t
y;
size_t
rows;
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) cache_info->number_channels*nexus_info->region.width*
sizeof(Quantum);
extent=length*nexus_info->region.height;
rows=nexus_info->region.height;
y=0;
p=nexus_info->pixels;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
Quantum
*magick_restrict q;
/*
Write pixels to memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
q=cache_info->pixels+cache_info->number_channels*offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->number_channels*nexus_info->region.width;
q+=cache_info->number_channels*cache_info->columns;
}
break;
}
case DiskCache:
{
/*
Write pixels to disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WritePixelCacheRegion(cache_info,cache_info->offset+offset*
cache_info->number_channels*sizeof(*p),length,(const unsigned char *)
p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->number_channels*nexus_info->region.width;
offset+=cache_info->columns;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Write pixels to distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WriteDistributePixelCachePixels((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->number_channels*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToWritePixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
|
is.c | /*--------------------------------------------------------------------
NAS Parallel Benchmarks 2.3 OpenMP C versions - IS
This benchmark is an OpenMP C version of the NPB IS code.
The OpenMP C versions are developed by RWCP and derived from the serial
Fortran versions in "NPB 2.3-serial" developed by NAS.
Permission to use, copy, distribute and modify this software for any
purpose with or without fee is hereby granted.
This software is provided "as is" without express or implied warranty.
Send comments on the OpenMP C versions to pdp-openmp@rwcp.or.jp
Information on OpenMP activities at RWCP is available at:
http://pdplab.trc.rwcp.or.jp/pdperf/Omni/
Information on NAS Parallel Benchmarks 2.3 is available at:
http://www.nas.nasa.gov/NAS/NPB/
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
Author: M. Yarrow
OpenMP C version: S. Satoh
--------------------------------------------------------------------*/
#include "npbparams.h"
#include <stdlib.h>
#include <stdio.h>
#if defined(_OPENMP)
#include <omp.h>
#endif /* _OPENMP */
extern void timer_clear(int);
extern void timer_start(int);
extern void timer_stop(int);
extern double timer_read(int);
extern void c_print_results(char *name, char class, int n1, int n2,
int n3, int niter, int nthreads, double t,
double mops, char *optype, int passed_verification,
char *npbversion, char *compiletime, char *cc,
char *clink, char *c_lib, char *c_inc,
char *cflags, char *clinkflags, char *rand);
/*****************************************************************/
/* For serial IS, buckets are not really req'd to solve NPB1 IS */
/* spec, but their use on some machines improves performance, on */
/* other machines the use of buckets compromises performance, */
/* probably because it is extra computation which is not req'd. */
/* (Note: Mechanism not understood, probably cache related) */
/* Example: SP2-66MhzWN: 50% speedup with buckets */
/* Example: SGI Indy5000: 50% slowdown with buckets */
/* Example: SGI O2000: 400% slowdown with buckets (Wow!) */
/*****************************************************************/
/* #define USE_BUCKETS */
/* buckets are not used in the OpenMP C version */
/******************/
/* default values */
/******************/
#ifndef CLASS
#define CLASS 'S'
#endif
/*************/
/* CLASS S */
/*************/
#if CLASS == 'S'
#define TOTAL_KEYS_LOG_2 16
#define MAX_KEY_LOG_2 11
#define NUM_BUCKETS_LOG_2 9
#endif
/*************/
/* CLASS W */
/*************/
#if CLASS == 'W'
#define TOTAL_KEYS_LOG_2 20
#define MAX_KEY_LOG_2 16
#define NUM_BUCKETS_LOG_2 10
#endif
/*************/
/* CLASS A */
/*************/
#if CLASS == 'A'
#define TOTAL_KEYS_LOG_2 23
#define MAX_KEY_LOG_2 19
#define NUM_BUCKETS_LOG_2 10
#endif
/*************/
/* CLASS B */
/*************/
#if CLASS == 'B'
#define TOTAL_KEYS_LOG_2 25
#define MAX_KEY_LOG_2 21
#define NUM_BUCKETS_LOG_2 10
#endif
/*************/
/* CLASS C */
/*************/
#if CLASS == 'C'
#define TOTAL_KEYS_LOG_2 27
#define MAX_KEY_LOG_2 23
#define NUM_BUCKETS_LOG_2 10
#endif
#define TOTAL_KEYS (1 << TOTAL_KEYS_LOG_2)
#define MAX_KEY (1 << MAX_KEY_LOG_2)
#define NUM_BUCKETS (1 << NUM_BUCKETS_LOG_2)
#define NUM_KEYS TOTAL_KEYS
#define SIZE_OF_BUFFERS NUM_KEYS
#define MAX_ITERATIONS 10
#define TEST_ARRAY_SIZE 5
/*************************************/
/* Typedef: if necessary, change the */
/* size of int here by changing the */
/* int type to, say, long */
/*************************************/
typedef int INT_TYPE;
/********************/
/* Some global info */
/********************/
INT_TYPE *key_buff_ptr_global; /* used by full_verify to get */
/* copies of rank info */
int passed_verification;
/************************************/
/* These are the three main arrays. */
/* See SIZE_OF_BUFFERS def above */
/************************************/
INT_TYPE key_array[SIZE_OF_BUFFERS],
key_buff1[SIZE_OF_BUFFERS],
key_buff2[SIZE_OF_BUFFERS],
partial_verify_vals[TEST_ARRAY_SIZE];
#ifdef USE_BUCKETS
INT_TYPE bucket_size[NUM_BUCKETS],
bucket_ptrs[NUM_BUCKETS];
#endif
/**********************/
/* Partial verif info */
/**********************/
INT_TYPE test_index_array[TEST_ARRAY_SIZE],
test_rank_array[TEST_ARRAY_SIZE],
S_test_index_array[TEST_ARRAY_SIZE] =
{48427,17148,23627,62548,4431},
S_test_rank_array[TEST_ARRAY_SIZE] =
{0,18,346,64917,65463},
W_test_index_array[TEST_ARRAY_SIZE] =
{357773,934767,875723,898999,404505},
W_test_rank_array[TEST_ARRAY_SIZE] =
{1249,11698,1039987,1043896,1048018},
A_test_index_array[TEST_ARRAY_SIZE] =
{2112377,662041,5336171,3642833,4250760},
A_test_rank_array[TEST_ARRAY_SIZE] =
{104,17523,123928,8288932,8388264},
B_test_index_array[TEST_ARRAY_SIZE] =
{41869,812306,5102857,18232239,26860214},
B_test_rank_array[TEST_ARRAY_SIZE] =
{33422937,10244,59149,33135281,99},
C_test_index_array[TEST_ARRAY_SIZE] =
{44172927,72999161,74326391,129606274,21736814},
C_test_rank_array[TEST_ARRAY_SIZE] =
{61147,882988,266290,133997595,133525895};
/***********************/
/* function prototypes */
/***********************/
double randlc( double *X, double *A );
void full_verify( void );
/*
* FUNCTION RANDLC (X, A)
*
* This routine returns a uniform pseudorandom double precision number in the
* range (0, 1) by using the linear congruential generator
*
* x_{k+1} = a x_k (mod 2^46)
*
* where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers
* before repeating. The argument A is the same as 'a' in the above formula,
* and X is the same as x_0. A and X must be odd double precision integers
* in the range (1, 2^46). The returned value RANDLC is normalized to be
* between 0 and 1, i.e. RANDLC = 2^(-46) * x_1. X is updated to contain
* the new seed x_1, so that subsequent calls to RANDLC using the same
* arguments will generate a continuous sequence.
*
* This routine should produce the same results on any computer with at least
* 48 mantissa bits in double precision floating point data. On Cray systems,
* double precision should be disabled.
*
* David H. Bailey October 26, 1990
*
* IMPLICIT DOUBLE PRECISION (A-H, O-Z)
* SAVE KS, R23, R46, T23, T46
* DATA KS/0/
*
* If this is the first call to RANDLC, compute R23 = 2 ^ -23, R46 = 2 ^ -46,
* T23 = 2 ^ 23, and T46 = 2 ^ 46. These are computed in loops, rather than
* by merely using the ** operator, in order to insure that the results are
* exact on all systems. This code assumes that 0.5D0 is represented exactly.
*/
/*****************************************************************/
/************* R A N D L C ************/
/************* ************/
/************* portable random number generator ************/
/*****************************************************************/
double randlc(X, A)
double *X;
double *A;
{
static int KS=0;
static double R23, R46, T23, T46;
double T1, T2, T3, T4;
double A1;
double A2;
double X1;
double X2;
double Z;
int i, j;
if (KS == 0)
{
R23 = 1.0;
R46 = 1.0;
T23 = 1.0;
T46 = 1.0;
for (i=1; i<=23; i++)
{
R23 = 0.50 * R23;
T23 = 2.0 * T23;
}
for (i=1; i<=46; i++)
{
R46 = 0.50 * R46;
T46 = 2.0 * T46;
}
KS = 1;
}
/* Break A into two parts such that A = 2^23 * A1 + A2 and set X = N. */
T1 = R23 * *A;
j = T1;
A1 = j;
A2 = *A - T23 * A1;
/* Break X into two parts such that X = 2^23 * X1 + X2, compute
Z = A1 * X2 + A2 * X1 (mod 2^23), and then
X = 2^23 * Z + A2 * X2 (mod 2^46). */
T1 = R23 * *X;
j = T1;
X1 = j;
X2 = *X - T23 * X1;
T1 = A1 * X2 + A2 * X1;
j = R23 * T1;
T2 = j;
Z = T1 - T23 * T2;
T3 = T23 * Z + A2 * X2;
j = R46 * T3;
T4 = j;
*X = T3 - T46 * T4;
return(R46 * *X);
}
/*****************************************************************/
/************* C R E A T E _ S E Q ************/
/*****************************************************************/
void create_seq( double seed, double a )
{
double x;
int i, j, k;
k = MAX_KEY/4;
for (i=0; i<NUM_KEYS; i++)
{
x = randlc(&seed, &a);
x += randlc(&seed, &a);
x += randlc(&seed, &a);
x += randlc(&seed, &a);
key_array[i] = k*x;
}
}
/*****************************************************************/
/************* F U L L _ V E R I F Y ************/
/*****************************************************************/
void full_verify()
{
INT_TYPE i, j;
INT_TYPE k;
INT_TYPE m, unique_keys;
/* Now, finally, sort the keys: */
for( i=0; i<NUM_KEYS; i++ )
key_array[--key_buff_ptr_global[key_buff2[i]]] = key_buff2[i];
/* Confirm keys correctly sorted: count incorrectly sorted keys, if any */
j = 0;
for( i=1; i<NUM_KEYS; i++ )
if( key_array[i-1] > key_array[i] )
j++;
if( j != 0 )
{
printf( "Full_verify: number of keys out of sort: %d\n",
j );
}
else
passed_verification++;
}
/*****************************************************************/
/************* R A N K ****************/
/*****************************************************************/
void rank( int iteration )
{
INT_TYPE i, j, k;
INT_TYPE l, m;
INT_TYPE shift = MAX_KEY_LOG_2 - NUM_BUCKETS_LOG_2;
INT_TYPE key;
INT_TYPE min_key_val, max_key_val;
INT_TYPE prv_buff1[MAX_KEY];
#pragma omp master
{
key_array[iteration] = iteration;
key_array[iteration+MAX_ITERATIONS] = MAX_KEY - iteration;
/* Determine where the partial verify test keys are, load into */
/* top of array bucket_size */
for( i=0; i<TEST_ARRAY_SIZE; i++ )
partial_verify_vals[i] = key_array[test_index_array[i]];
/* Clear the work array */
for( i=0; i<MAX_KEY; i++ )
key_buff1[i] = 0;
}
#pragma omp barrier
for (i=0; i<MAX_KEY; i++)
prv_buff1[i] = 0;
/* Copy keys into work array; keys in key_array will be reused each iter. */
#pragma omp for nowait
for( i=0; i<NUM_KEYS; i++ ) {
key_buff2[i] = key_array[i];
/* Ranking of all keys occurs in this section: */
/* In this section, the keys themselves are used as their
own indexes to determine how many of each there are: their
individual population */
prv_buff1[key_buff2[i]]++; /* Now they have individual key */
}
/* population */
for( i=0; i<MAX_KEY-1; i++ )
prv_buff1[i+1] += prv_buff1[i];
#pragma omp critical
{
for( i=0; i<MAX_KEY; i++ )
key_buff1[i] += prv_buff1[i];
}
/* To obtain ranks of each key, successively add the individual key
population, not forgetting to add m, the total of lesser keys,
to the first key population */
#pragma omp barrier
#pragma omp master
{
/* This is the partial verify test section */
/* Observe that test_rank_array vals are */
/* shifted differently for different cases */
for( i=0; i<TEST_ARRAY_SIZE; i++ )
{
k = partial_verify_vals[i]; /* test vals were put here */
if( 0 <= k && k <= NUM_KEYS-1 )
switch( CLASS )
{
case 'S':
if( i <= 2 )
{
if( key_buff1[k-1] != test_rank_array[i]+iteration )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
else
{
if( key_buff1[k-1] != test_rank_array[i]-iteration )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
break;
case 'W':
if( i < 2 )
{
if( key_buff1[k-1] !=
test_rank_array[i]+(iteration-2) )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
else
{
if( key_buff1[k-1] != test_rank_array[i]-iteration )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
break;
case 'A':
if( i <= 2 )
{
if( key_buff1[k-1] !=
test_rank_array[i]+(iteration-1) )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
else
{
if( key_buff1[k-1] !=
test_rank_array[i]-(iteration-1) )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
break;
case 'B':
if( i == 1 || i == 2 || i == 4 )
{
if( key_buff1[k-1] != test_rank_array[i]+iteration )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
else
{
if( key_buff1[k-1] != test_rank_array[i]-iteration )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
break;
case 'C':
if( i <= 2 )
{
if( key_buff1[k-1] != test_rank_array[i]+iteration )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
else
{
if( key_buff1[k-1] != test_rank_array[i]-iteration )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
break;
}
}
/* Make copies of rank info for use by full_verify: these variables
in rank are local; making them global slows down the code, probably
since they cannot be made register by compiler */
if( iteration == MAX_ITERATIONS )
key_buff_ptr_global = key_buff1;
} /* end master */
}
/*****************************************************************/
/************* M A I N ****************/
/*****************************************************************/
main( argc, argv )
int argc;
char **argv;
{
int i, iteration, itemp;
int nthreads = 1;
double timecounter, maxtime;
/* Initialize the verification arrays if a valid class */
for( i=0; i<TEST_ARRAY_SIZE; i++ )
switch( CLASS )
{
case 'S':
test_index_array[i] = S_test_index_array[i];
test_rank_array[i] = S_test_rank_array[i];
break;
case 'A':
test_index_array[i] = A_test_index_array[i];
test_rank_array[i] = A_test_rank_array[i];
break;
case 'W':
test_index_array[i] = W_test_index_array[i];
test_rank_array[i] = W_test_rank_array[i];
break;
case 'B':
test_index_array[i] = B_test_index_array[i];
test_rank_array[i] = B_test_rank_array[i];
break;
case 'C':
test_index_array[i] = C_test_index_array[i];
test_rank_array[i] = C_test_rank_array[i];
break;
};
/* Printout initial NPB info */
printf( "\n\n NAS Parallel Benchmarks 2.3 OpenMP C version"
" - IS Benchmark\n\n" );
printf( " Size: %d (class %c)\n", TOTAL_KEYS, CLASS );
printf( " Iterations: %d\n", MAX_ITERATIONS );
/* Initialize timer */
timer_clear( 0 );
/* Generate random number sequence and subsequent keys on all procs */
create_seq( 314159265.00, /* Random number gen seed */
1220703125.00 ); /* Random number gen mult */
/* Do one interation for free (i.e., untimed) to guarantee initialization of
all data and code pages and respective tables */
#pragma omp parallel
rank( 1 );
/* Start verification counter */
passed_verification = 0;
if( CLASS != 'S' ) printf( "\n iteration\n" );
/* Start timer */
timer_start( 0 );
/* This is the main iteration */
#pragma omp parallel private(iteration)
for( iteration=1; iteration<=MAX_ITERATIONS; iteration++ )
{
#pragma omp master
if( CLASS != 'S' ) printf( " %d\n", iteration );
rank( iteration );
#if defined(_OPENMP)
#pragma omp master
nthreads = omp_get_num_threads();
#endif /* _OPENMP */
}
/* End of timing, obtain maximum time of all processors */
timer_stop( 0 );
timecounter = timer_read( 0 );
/* This tests that keys are in sequence: sorting of last ranked key seq
occurs here, but is an untimed operation */
full_verify();
/* The final printout */
if( passed_verification != 5*MAX_ITERATIONS + 1 )
passed_verification = 0;
c_print_results( "IS",
CLASS,
TOTAL_KEYS,
0,
0,
MAX_ITERATIONS,
nthreads,
timecounter,
((double) (MAX_ITERATIONS*TOTAL_KEYS))
/timecounter/1000000.,
"keys ranked",
passed_verification,
NPBVERSION,
COMPILETIME,
CC,
CLINK,
C_LIB,
C_INC,
CFLAGS,
CLINKFLAGS,
"randlc");
/**************************/
} /* E N D P R O G R A M */
/**************************/
|
dataset.h | #ifndef LIGHTGBM_DATASET_H_
#define LIGHTGBM_DATASET_H_
#include <LightGBM/utils/random.h>
#include <LightGBM/utils/text_reader.h>
#include <LightGBM/utils/openmp_wrapper.h>
#include <LightGBM/meta.h>
#include <LightGBM/config.h>
#include <LightGBM/feature_group.h>
#include <vector>
#include <utility>
#include <functional>
#include <string>
#include <unordered_set>
#include <mutex>
namespace LightGBM {
/*! \brief forward declaration */
class DatasetLoader;
/*!
* \brief This class is used to store some meta(non-feature) data for training data,
* e.g. labels, weights, initial scores, qurey level informations.
*
* Some details:
* 1. Label, used for traning.
* 2. Weights, weighs of records, optional
* 3. Query Boundaries, necessary for lambdarank.
* The documents of i-th query is in [ query_boundarise[i], query_boundarise[i+1] )
* 4. Query Weights, auto calculate by weights and query_boundarise(if both of them are existed)
* the weight for i-th query is sum(query_boundarise[i] , .., query_boundarise[i+1]) / (query_boundarise[i + 1] - query_boundarise[i+1])
* 5. Initial score. optional. if exsitng, the model will boost from this score, otherwise will start from 0.
*/
class Metadata {
public:
/*!
* \brief Null costructor
*/
Metadata();
/*!
* \brief Initialization will load qurey level informations, since it is need for sampling data
* \param data_filename Filename of data
* \param init_score_filename Filename of initial score
*/
void Init(const char* data_filename);
/*!
* \brief init as subset
* \param metadata Filename of data
* \param used_indices
* \param num_used_indices
*/
void Init(const Metadata& metadata, const data_size_t* used_indices, data_size_t num_used_indices);
/*!
* \brief Initial with binary memory
* \param memory Pointer to memory
*/
void LoadFromMemory(const void* memory);
/*! \brief Destructor */
~Metadata();
/*!
* \brief Initial work, will allocate space for label, weight(if exists) and query(if exists)
* \param num_data Number of training data
* \param weight_idx Index of weight column, < 0 means doesn't exists
* \param query_idx Index of query id column, < 0 means doesn't exists
*/
void Init(data_size_t num_data, int weight_idx, int query_idx);
/*!
* \brief Partition label by used indices
* \param used_indices Indice of local used
*/
void PartitionLabel(const std::vector<data_size_t>& used_indices);
/*!
* \brief Partition meta data according to local used indices if need
* \param num_all_data Number of total training data, including other machines' data on parallel learning
* \param used_data_indices Indices of local used training data
*/
void CheckOrPartition(data_size_t num_all_data,
const std::vector<data_size_t>& used_data_indices);
void SetLabel(const float* label, data_size_t len);
void SetWeights(const float* weights, data_size_t len);
void SetQuery(const data_size_t* query, data_size_t len);
/*!
* \brief Set initial scores
* \param init_score Initial scores, this class will manage memory for init_score.
*/
void SetInitScore(const double* init_score, data_size_t len);
/*!
* \brief Save binary data to file
* \param file File want to write
*/
void SaveBinaryToFile(FILE* file) const;
/*!
* \brief Get sizes in byte of this object
*/
size_t SizesInByte() const;
/*!
* \brief Get pointer of label
* \return Pointer of label
*/
inline const float* label() const { return label_.data(); }
/*!
* \brief Set label for one record
* \param idx Index of this record
* \param value Label value of this record
*/
inline void SetLabelAt(data_size_t idx, float value)
{
label_[idx] = value;
}
/*!
* \brief Set Weight for one record
* \param idx Index of this record
* \param value Weight value of this record
*/
inline void SetWeightAt(data_size_t idx, float value)
{
weights_[idx] = value;
}
/*!
* \brief Set Query Id for one record
* \param idx Index of this record
* \param value Query Id value of this record
*/
inline void SetQueryAt(data_size_t idx, data_size_t value)
{
queries_[idx] = static_cast<data_size_t>(value);
}
/*!
* \brief Get weights, if not exists, will return nullptr
* \return Pointer of weights
*/
inline const float* weights() const {
if (!weights_.empty()) {
return weights_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get data boundaries on queries, if not exists, will return nullptr
* we assume data will order by query,
* the interval of [query_boundaris[i], query_boundaris[i+1])
* is the data indices for query i.
* \return Pointer of data boundaries on queries
*/
inline const data_size_t* query_boundaries() const {
if (!query_boundaries_.empty()) {
return query_boundaries_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get Number of queries
* \return Number of queries
*/
inline data_size_t num_queries() const { return num_queries_; }
/*!
* \brief Get weights for queries, if not exists, will return nullptr
* \return Pointer of weights for queries
*/
inline const float* query_weights() const {
if (!query_weights_.empty()) {
return query_weights_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get initial scores, if not exists, will return nullptr
* \return Pointer of initial scores
*/
inline const double* init_score() const {
if (!init_score_.empty()) {
return init_score_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get size of initial scores
*/
inline int64_t num_init_score() const { return num_init_score_; }
/*! \brief Disable copy */
Metadata& operator=(const Metadata&) = delete;
/*! \brief Disable copy */
Metadata(const Metadata&) = delete;
private:
/*! \brief Load initial scores from file */
void LoadInitialScore();
/*! \brief Load wights from file */
void LoadWeights();
/*! \brief Load query boundaries from file */
void LoadQueryBoundaries();
/*! \brief Load query wights */
void LoadQueryWeights();
/*! \brief Filename of current data */
const char* data_filename_;
/*! \brief Number of data */
data_size_t num_data_;
/*! \brief Number of weights, used to check correct weight file */
data_size_t num_weights_;
/*! \brief Label data */
std::vector<float> label_;
/*! \brief Weights data */
std::vector<float> weights_;
/*! \brief Query boundaries */
std::vector<data_size_t> query_boundaries_;
/*! \brief Query weights */
std::vector<float> query_weights_;
/*! \brief Number of querys */
data_size_t num_queries_;
/*! \brief Number of Initial score, used to check correct weight file */
int64_t num_init_score_;
/*! \brief Initial score */
std::vector<double> init_score_;
/*! \brief Queries data */
std::vector<data_size_t> queries_;
/*! \brief mutex for threading safe call */
std::mutex mutex_;
bool weight_load_from_file_;
bool query_load_from_file_;
bool init_score_load_from_file_;
};
/*! \brief Interface for Parser */
class Parser {
public:
/*! \brief virtual destructor */
virtual ~Parser() {}
/*!
* \brief Parse one line with label
* \param str One line record, string format, should end with '\0'
* \param out_features Output columns, store in (column_idx, values)
* \param out_label Label will store to this if exists
*/
virtual void ParseOneLine(const char* str,
std::vector<std::pair<int, double>>* out_features, double* out_label) const = 0;
/*!
* \brief Create a object of parser, will auto choose the format depend on file
* \param filename One Filename of data
* \param num_features Pass num_features of this data file if you know, <=0 means don't know
* \param label_idx index of label column
* \return Object of parser
*/
static Parser* CreateParser(const char* filename, bool has_header, int num_features, int label_idx);
};
/*! \brief The main class of data set,
* which are used to traning or validation
*/
class Dataset {
public:
friend DatasetLoader;
LIGHTGBM_EXPORT Dataset();
LIGHTGBM_EXPORT Dataset(data_size_t num_data);
void Construct(
std::vector<std::unique_ptr<BinMapper>>& bin_mappers,
int** sample_non_zero_indices,
const int* num_per_col,
size_t total_sample_cnt,
const IOConfig& io_config);
/*! \brief Destructor */
LIGHTGBM_EXPORT ~Dataset();
LIGHTGBM_EXPORT bool CheckAlign(const Dataset& other) const {
if (num_features_ != other.num_features_) {
return false;
}
if (num_total_features_ != other.num_total_features_) {
return false;
}
if (label_idx_ != other.label_idx_) {
return false;
}
for (int i = 0; i < num_features_; ++i) {
if (!FeatureBinMapper(i)->CheckAlign(*(other.FeatureBinMapper(i)))) {
return false;
}
}
return true;
}
inline void PushOneRow(int tid, data_size_t row_idx, const std::vector<double>& feature_values) {
if (is_finish_load_) { return; }
for (size_t i = 0; i < feature_values.size() && i < static_cast<size_t>(num_total_features_); ++i) {
int feature_idx = used_feature_map_[i];
if (feature_idx >= 0) {
const int group = feature2group_[feature_idx];
const int sub_feature = feature2subfeature_[feature_idx];
feature_groups_[group]->PushData(tid, sub_feature, row_idx, feature_values[i]);
}
}
}
inline void PushOneRow(int tid, data_size_t row_idx, const std::vector<std::pair<int, double>>& feature_values) {
if (is_finish_load_) { return; }
for (auto& inner_data : feature_values) {
if (inner_data.first >= num_total_features_) { continue; }
int feature_idx = used_feature_map_[inner_data.first];
if (feature_idx >= 0) {
const int group = feature2group_[feature_idx];
const int sub_feature = feature2subfeature_[feature_idx];
feature_groups_[group]->PushData(tid, sub_feature, row_idx, inner_data.second);
}
}
}
inline void PushOneData(int tid, data_size_t row_idx, int group, int sub_feature, double value) {
feature_groups_[group]->PushData(tid, sub_feature, row_idx, value);
}
inline int RealFeatureIndex(int fidx) const {
return real_feature_idx_[fidx];
}
inline int InnerFeatureIndex(int col_idx) const {
return used_feature_map_[col_idx];
}
inline int Feature2Group(int feature_idx) const {
return feature2group_[feature_idx];
}
inline int Feture2SubFeature(int feature_idx) const {
return feature2subfeature_[feature_idx];
}
inline uint64_t GroupBinBoundary(int group_idx) const {
return group_bin_boundaries_[group_idx];
}
inline uint64_t NumTotalBin() const {
return group_bin_boundaries_.back();
}
void ReSize(data_size_t num_data);
void CopySubset(const Dataset* fullset, const data_size_t* used_indices, data_size_t num_used_indices, bool need_meta_data);
LIGHTGBM_EXPORT void FinishLoad();
LIGHTGBM_EXPORT bool SetFloatField(const char* field_name, const float* field_data, data_size_t num_element);
LIGHTGBM_EXPORT bool SetDoubleField(const char* field_name, const double* field_data, data_size_t num_element);
LIGHTGBM_EXPORT bool SetIntField(const char* field_name, const int* field_data, data_size_t num_element);
LIGHTGBM_EXPORT bool GetFloatField(const char* field_name, data_size_t* out_len, const float** out_ptr);
LIGHTGBM_EXPORT bool GetDoubleField(const char* field_name, data_size_t* out_len, const double** out_ptr);
LIGHTGBM_EXPORT bool GetIntField(const char* field_name, data_size_t* out_len, const int** out_ptr);
/*!
* \brief Save current dataset into binary file, will save to "filename.bin"
*/
LIGHTGBM_EXPORT void SaveBinaryFile(const char* bin_filename);
LIGHTGBM_EXPORT void CopyFeatureMapperFrom(const Dataset* dataset);
LIGHTGBM_EXPORT void CreateValid(const Dataset* dataset);
void ConstructHistograms(const std::vector<int8_t>& is_feature_used,
const data_size_t* data_indices, data_size_t num_data,
int leaf_idx,
std::vector<std::unique_ptr<OrderedBin>>& ordered_bins,
const score_t* gradients, const score_t* hessians,
score_t* ordered_gradients, score_t* ordered_hessians,
bool is_constant_hessian,
HistogramBinEntry* histogram_data) const;
void FixHistogram(int feature_idx, double sum_gradient, double sum_hessian, data_size_t num_data,
HistogramBinEntry* data) const;
inline data_size_t Split(int feature,
uint32_t threshold, uint32_t default_bin_for_zero,
data_size_t* data_indices, data_size_t num_data,
data_size_t* lte_indices, data_size_t* gt_indices) const {
const int group = feature2group_[feature];
const int sub_feature = feature2subfeature_[feature];
return feature_groups_[group]->Split(sub_feature, threshold, default_bin_for_zero, data_indices, num_data, lte_indices, gt_indices);
}
inline int SubFeatureBinOffset(int i) const {
const int sub_feature = feature2subfeature_[i];
if (sub_feature == 0) {
return 1;
} else {
return 0;
}
}
inline int FeatureNumBin(int i) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature]->num_bin();
}
inline int FeatureGroupNumBin(int group) const {
return feature_groups_[group]->num_total_bin_;
}
inline const BinMapper* FeatureBinMapper(int i) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature].get();
}
inline const Bin* FeatureBin(int i) const {
const int group = feature2group_[i];
return feature_groups_[group]->bin_data_.get();
}
inline const Bin* FeatureGroupBin(int group) const {
return feature_groups_[group]->bin_data_.get();
}
inline BinIterator* FeatureIterator(int i) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->SubFeatureIterator(sub_feature);
}
inline BinIterator* FeatureGroupIterator(int group) const {
return feature_groups_[group]->FeatureGroupIterator();
}
inline double RealThreshold(int i, uint32_t threshold) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature]->BinToValue(threshold);
}
inline void CreateOrderedBins(std::vector<std::unique_ptr<OrderedBin>>* ordered_bins) const {
ordered_bins->resize(num_groups_);
OMP_INIT_EX();
#pragma omp parallel for schedule(guided)
for (int i = 0; i < num_groups_; ++i) {
OMP_LOOP_EX_BEGIN();
ordered_bins->at(i).reset(feature_groups_[i]->bin_data_->CreateOrderedBin());
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
}
/*!
* \brief Get meta data pointer
* \return Pointer of meta data
*/
inline const Metadata& metadata() const { return metadata_; }
/*! \brief Get Number of used features */
inline int num_features() const { return num_features_; }
/*! \brief Get Number of feature groups */
inline int num_feature_groups() const { return num_groups_;}
/*! \brief Get Number of total features */
inline int num_total_features() const { return num_total_features_; }
/*! \brief Get the index of label column */
inline int label_idx() const { return label_idx_; }
/*! \brief Get names of current data set */
inline const std::vector<std::string>& feature_names() const { return feature_names_; }
inline void set_feature_names(const std::vector<std::string>& feature_names) {
if (feature_names.size() != static_cast<size_t>(num_total_features_)) {
Log::Fatal("Size of feature_names error, should equal with total number of features");
}
feature_names_ = std::vector<std::string>(feature_names);
// replace ' ' in feature_names with '_'
bool spaceInFeatureName = false;
for (auto& feature_name: feature_names_){
if (feature_name.find(' ') != std::string::npos){
spaceInFeatureName = true;
std::replace(feature_name.begin(), feature_name.end(), ' ', '_');
}
}
if (spaceInFeatureName){
Log::Warning("Find whitespaces in feature_names, replace with underlines");
}
}
inline std::vector<std::string> feature_infos() const {
std::vector<std::string> bufs;
for (int i = 0; i < num_total_features_; i++) {
int fidx = used_feature_map_[i];
if (fidx == -1) {
bufs.push_back("none");
} else {
const auto bin_mapper = FeatureBinMapper(fidx);
bufs.push_back(bin_mapper->bin_info());
}
}
return bufs;
}
/*! \brief Get Number of data */
inline data_size_t num_data() const { return num_data_; }
/*! \brief Disable copy */
Dataset& operator=(const Dataset&) = delete;
/*! \brief Disable copy */
Dataset(const Dataset&) = delete;
private:
const char* data_filename_;
/*! \brief Store used features */
std::vector<std::unique_ptr<FeatureGroup>> feature_groups_;
/*! \brief Mapper from real feature index to used index*/
std::vector<int> used_feature_map_;
/*! \brief Number of used features*/
int num_features_;
/*! \brief Number of total features*/
int num_total_features_;
/*! \brief Number of total data*/
data_size_t num_data_;
/*! \brief Store some label level data*/
Metadata metadata_;
/*! \brief index of label column */
int label_idx_ = 0;
/*! \brief Threshold for treating a feature as a sparse feature */
double sparse_threshold_;
/*! \brief store feature names */
std::vector<std::string> feature_names_;
/*! \brief store feature names */
static const char* binary_file_token;
int num_groups_;
std::vector<int> real_feature_idx_;
std::vector<int> feature2group_;
std::vector<int> feature2subfeature_;
std::vector<uint64_t> group_bin_boundaries_;
std::vector<int> group_feature_start_;
std::vector<int> group_feature_cnt_;
bool is_finish_load_;
};
} // namespace LightGBM
#endif // LightGBM_DATA_H_
|
nodes.c | #include "nodes.h"
#include "../../../comms.h"
#include "../../../profiler.h"
#include "../../nodes_data.h"
#include "../../nodes_interface.h"
#include <assert.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
// Solve the unstructured diffusion problem
void solve_unstructured_diffusion_2d(
const int nx, const int ny, const int pad, Mesh* mesh,
NodesMesh* nmesh, const int max_inners, const double dt,
const double heat_capacity, const double conductivity, double* temperature,
double* b, double* r, double* p, double* rho, double* Ap, int* end_niters,
double* end_error, double* reduce_array) {
// Store initial residual
calculate_rhs(
nx, ny, pad, heat_capacity, conductivity, dt, nmesh->volume,
rho, temperature, b, nmesh->edge_vertex0,
nmesh->edge_vertex1, nmesh->cell_centroids_x,
nmesh->cell_centroids_y, nmesh->vertices_x,
nmesh->vertices_y, nmesh->cells_edges,
nmesh->edges_cells);
double local_old_r2 = initialise_cg(
nx, ny, pad, dt, conductivity, heat_capacity, p, r, temperature,
nmesh->volume, b, rho, nmesh->cells_edges,
nmesh->edge_vertex0, nmesh->edge_vertex1,
nmesh->vertices_x, nmesh->vertices_y,
nmesh->cell_centroids_x, nmesh->cell_centroids_y,
nmesh->edges_cells);
double global_old_r2 = reduce_all_sum(local_old_r2);
handle_boundary_2d(nx, ny, mesh, p, NO_INVERT, PACK);
handle_boundary_2d(nx, ny, mesh, temperature, NO_INVERT, PACK);
// TODO: Can one of the allreduces be removed with kernel fusion?
int ii = 0;
for (ii = 0; ii < max_inners; ++ii) {
const double local_pAp = calculate_pAp(
nx, ny, pad, p, Ap, dt, conductivity, heat_capacity, temperature,
nmesh->volume, rho, nmesh->cells_edges,
nmesh->edge_vertex0, nmesh->edge_vertex1,
nmesh->vertices_x, nmesh->vertices_y,
nmesh->cell_centroids_x,
nmesh->cell_centroids_y, nmesh->edges_cells);
const double global_pAp = reduce_all_sum(local_pAp);
const double alpha = global_old_r2 / global_pAp;
const double local_new_r2 =
calculate_new_r2(nx, ny, pad, alpha, temperature, p, r, Ap);
const double global_new_r2 = reduce_all_sum(local_new_r2);
const double beta = global_new_r2 / global_old_r2;
handle_boundary_2d(nx, ny, mesh, temperature, NO_INVERT, PACK);
// Check if the solution has converged
if (fabs(global_new_r2) < EPS) {
global_old_r2 = global_new_r2;
break;
}
update_conjugate(nx, ny, pad, beta, r, p);
handle_boundary_2d(nx, ny, mesh, p, NO_INVERT, PACK);
// Store the old squared residual
global_old_r2 = global_new_r2;
}
*end_niters = ii;
*end_error = global_old_r2;
}
// Calculate the RHS including the unstructured correction term
void calculate_rhs(const int nx, const int ny, const int pad,
const double heat_capacity, const double conductivity,
const double dt, const double* volume, const double* rho,
const double* temperature, double* b,
const int* edge_vertex0, const int* edge_vertex1,
const double* cell_centroids_x,
const double* cell_centroids_y, const double* vertices_x,
const double* vertices_y, const int* cells_edges,
const int* edges_cells) {
/*
Note here that the temperature is the guessed temperature.
*/
// Find the RHS that includes the unstructured mesh correction
#pragma omp parallel for
for (int ii = pad; ii < ny - pad; ++ii) {
#pragma omp simd
for (int jj = pad; jj < nx - pad; ++jj) {
// Fetch the cell centered values
const int cell_index = (ii)*nx + (jj);
const double density = rho[(cell_index)];
const double V = volume[(cell_index)];
// Calculate the cell centroids
const double cell_centroid_x = cell_centroids_x[(cell_index)];
const double cell_centroid_y = cell_centroids_y[(cell_index)];
/*
* Performing least squares approximation to get unknown d
* d = [ dphi/dx, dphi/dy ]
* M = [ (dx0, dx1 ..., dx_ff) (dy0, dy1, ..., dy_ff) ]
* del(phi) = [ phi1-phi0, phi2-phi0, ..., phi_ff-phi0 ]
* d = (M^T.M)^(-1).(M^T).del(phi)
*/
// Calculate the coefficents to matrix M
double MTM[3] = {0.0}; // Describes the three unique quantities in (M^T.M)
double MT_del_phi[2] = {0.0};
double coeff[2] = {0.0};
// Calculate the coefficients for all edges
for (int ee = 0; ee < NEDGES; ++ee) {
const int edge_index = cells_edges[(ee)*nx * ny + (cell_index)];
const int neighbour_index =
(edges_cells[edge_index * NCELLS_PER_EDGE] == cell_index)
? edges_cells[edge_index * NCELLS_PER_EDGE + 1]
: edges_cells[edge_index * NCELLS_PER_EDGE];
// Calculate the vector pointing between the cell centroids
double es_x = (cell_centroids_x[(neighbour_index)] - cell_centroid_x);
double es_y = (cell_centroids_y[(neighbour_index)] - cell_centroid_y);
const double centroid_distance = sqrt(es_x * es_x + es_y * es_y);
es_x /= centroid_distance;
es_y /= centroid_distance;
// Calculate the edge differentials
const int vertex0 = edge_vertex0[(edge_index)];
const int vertex1 = edge_vertex1[(edge_index)];
// Calculate the area vector, even though vertices aren't ordered well
double A_x = (vertices_y[vertex1] - vertices_y[vertex0]);
double A_y = -(vertices_x[vertex1] - vertices_x[vertex0]);
if ((A_x * es_x + A_y * es_y) < 0.0) {
A_x = -A_x;
A_y = -A_y;
}
// Calculate the gradient matrix
const double phi0 = temperature[(cell_index)];
const double phi_ff = temperature[(neighbour_index)];
MTM[0] += es_x * es_x;
MTM[1] += es_x * es_y;
MTM[2] += es_y * es_y;
MT_del_phi[0] += es_x * (phi_ff - phi0);
MT_del_phi[1] += es_y * (phi_ff - phi0);
// Calculate the coefficients of transformed shape
const double density1 = rho[(neighbour_index)];
const double edge_density =
(2.0 * density * density1) / (density + density1);
const double diffusion_coeff =
conductivity / (edge_density * heat_capacity);
const double gam = (A_x * A_x + A_y * A_y) / (A_x * es_x + A_y * es_y);
coeff[0] += diffusion_coeff * (A_x - es_x * gam);
coeff[1] += diffusion_coeff * (A_y - es_y * gam);
}
// Solve the equation for the temperature gradients
const double MTM_det = (1.0 / (MTM[0] * MTM[2] - MTM[1] * MTM[1]));
const double temp_grad_cell_x =
MTM_det * (MT_del_phi[0] * MTM[2] - MT_del_phi[1] * MTM[1]);
const double temp_grad_cell_y =
MTM_det * (MT_del_phi[1] * MTM[0] - MT_del_phi[0] * MTM[1]);
// TODO: SHOULD THERE BE A COEFFICIENT FOR TAU?
const double tau =
temp_grad_cell_x * coeff[0] + temp_grad_cell_y * coeff[1];
b[(cell_index)] = temperature[(cell_index)] + (dt / (density * V)) * tau;
}
}
}
// Initialises the CG solver
double initialise_cg(const int nx, const int ny, const int pad, const double dt,
const double conductivity, const double heat_capacity,
double* p, double* r, const double* temperature,
const double* volume, const double* b, const double* rho,
const int* cells_edges, const int* edge_vertex0,
const int* edge_vertex1, const double* vertices_x,
const double* vertices_y, const double* cell_centroids_x,
const double* cell_centroids_y, const int* edges_cells) {
START_PROFILING(&compute_profile);
// Going to initialise the coefficients here. This ensures that if the
// density or mesh were changed by another package, that the coefficients
// are updated accordingly, making performance evaluation fairer.
double initial_r2 = 0.0;
#pragma omp parallel for reduction(+ : initial_r2)
for (int ii = pad; ii < ny - pad; ++ii) {
#pragma omp simd
for (int jj = pad; jj < nx - pad; ++jj) {
const int cell_index = (ii)*nx + (jj);
const double density = rho[(cell_index)];
const double V = volume[(cell_index)];
// Calculate the cell centroids
const double cell_centroid_x = cell_centroids_x[(cell_index)];
const double cell_centroid_y = cell_centroids_y[(cell_index)];
double neighbour_coeff_total = 0.0;
double neighbour_contribution = 0.0;
for (int ee = 0; ee < NEDGES; ++ee) {
const int edge_index = cells_edges[(ee)*nx * ny + (cell_index)];
const int neighbour_index =
(edges_cells[edge_index * NCELLS_PER_EDGE] == cell_index)
? edges_cells[edge_index * NCELLS_PER_EDGE + 1]
: edges_cells[edge_index * NCELLS_PER_EDGE];
// Calculate the unit vector pointing between the cell centroids
double es_x = (cell_centroids_x[(neighbour_index)] - cell_centroid_x);
double es_y = (cell_centroids_y[(neighbour_index)] - cell_centroid_y);
const double centroid_distance = sqrt(es_x * es_x + es_y * es_y);
es_x /= centroid_distance;
es_y /= centroid_distance;
// Calculate the edge differentials
const int vertex0 = edge_vertex0[(edge_index)];
const int vertex1 = edge_vertex1[(edge_index)];
// Calculate the area vector, even though vertices aren't ordered well
double A_x = (vertices_y[vertex1] - vertices_y[vertex0]);
double A_y = -(vertices_x[vertex1] - vertices_x[vertex0]);
if ((A_x * es_x + A_y * es_y) < 0.0) {
A_x = -A_x;
A_y = -A_y;
}
// Calculate the diffusion coefficient
const double edge_density = (2.0 * density * rho[(neighbour_index)]) /
(density + rho[(neighbour_index)]);
const double diffusion_coeff =
conductivity / (edge_density * heat_capacity);
const double neighbour_coeff =
(dt * diffusion_coeff * (A_x * A_x + A_y * A_y)) /
(V * centroid_distance * (A_x * es_x + A_y * es_y));
neighbour_contribution +=
temperature[(neighbour_index)] * neighbour_coeff;
neighbour_coeff_total += neighbour_coeff;
}
r[(cell_index)] = b[(cell_index)] - ((neighbour_coeff_total + 1.0) *
temperature[(cell_index)] -
neighbour_contribution);
p[(cell_index)] = r[(cell_index)];
initial_r2 += r[(cell_index)] * r[(cell_index)];
}
}
STOP_PROFILING(&compute_profile, "initialise cg");
return initial_r2;
}
// Calculates a value for alpha
double calculate_pAp(const int nx, const int ny, const int pad, double* p,
double* Ap, const double dt, const double conductivity,
const double heat_capacity, const double* temperature,
const double* volume, const double* rho,
const int* cells_edges, const int* edge_vertex0,
const int* edge_vertex1, const double* vertices_x,
const double* vertices_y, const double* cell_centroids_x,
const double* cell_centroids_y, const int* edges_cells) {
START_PROFILING(&compute_profile);
double pAp = 0.0;
#pragma omp parallel for reduction(+ : pAp)
for (int ii = pad; ii < ny - pad; ++ii) {
#pragma omp simd
for (int jj = pad; jj < nx - pad; ++jj) {
const int cell_index = (ii)*nx + (jj);
const double density = rho[(cell_index)];
const double V = volume[(cell_index)];
// Calculate the cell centroids
const double cell_centroid_x = cell_centroids_x[(cell_index)];
const double cell_centroid_y = cell_centroids_y[(cell_index)];
double neighbour_coeff_total = 0.0;
double neighbour_contribution = 0.0;
for (int ee = 0; ee < NEDGES; ++ee) {
const int edge_index = cells_edges[(ee)*nx * ny + (cell_index)];
const int neighbour_index =
(edges_cells[edge_index * NCELLS_PER_EDGE] == cell_index)
? edges_cells[edge_index * NCELLS_PER_EDGE + 1]
: edges_cells[edge_index * NCELLS_PER_EDGE];
// Calculate the unit vector pointing between the cell centroids
double es_x = (cell_centroids_x[(neighbour_index)] - cell_centroid_x);
double es_y = (cell_centroids_y[(neighbour_index)] - cell_centroid_y);
const double centroid_distance = sqrt(es_x * es_x + es_y * es_y);
es_x /= centroid_distance;
es_y /= centroid_distance;
// Calculate the edge differentials
const int vertex0 = edge_vertex0[(edge_index)];
const int vertex1 = edge_vertex1[(edge_index)];
// Calculate the area vector, even though vertices aren't ordered well
double A_x = (vertices_y[vertex1] - vertices_y[vertex0]);
double A_y = -(vertices_x[vertex1] - vertices_x[vertex0]);
if ((A_x * es_x + A_y * es_y) < 0.0) {
A_x = -A_x;
A_y = -A_y;
}
// Calculate the diffusion coefficient
const double edge_density = (2.0 * density * rho[(neighbour_index)]) /
(density + rho[(neighbour_index)]);
const double diffusion_coeff =
conductivity / (edge_density * heat_capacity);
const double neighbour_coeff =
(dt * diffusion_coeff * (A_x * A_x + A_y * A_y)) /
(V * centroid_distance * (A_x * es_x + A_y * es_y));
neighbour_contribution += p[(neighbour_index)] * neighbour_coeff;
neighbour_coeff_total += neighbour_coeff;
}
Ap[(cell_index)] = ((neighbour_coeff_total + 1.0) * p[(cell_index)] -
neighbour_contribution);
pAp += p[(cell_index)] * Ap[(cell_index)];
}
}
STOP_PROFILING(&compute_profile, "calculate alpha");
return pAp;
}
// Updates the current guess using the calculated alpha
double calculate_new_r2(const int nx, const int ny, const int pad, double alpha,
double* temperature, double* p, double* r, double* Ap) {
START_PROFILING(&compute_profile);
double new_r2 = 0.0;
for (int ii = pad; ii < ny - pad; ++ii) {
for (int jj = pad; jj < nx - pad; ++jj) {
temperature[(ii)*nx + (jj)] += alpha * p[(ii)*nx + (jj)];
r[(ii)*nx + (jj)] -= alpha * Ap[(ii)*nx + (jj)];
new_r2 += r[(ii)*nx + (jj)] * r[(ii)*nx + (jj)];
}
}
STOP_PROFILING(&compute_profile, "calculate new r2");
return new_r2;
}
// Updates the conjugate from the calculated beta and residual
void update_conjugate(const int nx, const int ny, const int pad,
const double beta, const double* r, double* p) {
START_PROFILING(&compute_profile);
for (int ii = pad; ii < ny - pad; ++ii) {
for (int jj = pad; jj < nx - pad; ++jj) {
p[(ii)*nx + (jj)] = r[(ii)*nx + (jj)] + beta * p[(ii)*nx + (jj)];
}
}
STOP_PROFILING(&compute_profile, "update conjugate");
}
|
taco-gemm.c | // Generated by the Tensor Algebra Compiler (tensor-compiler.org)
// ./taco "A(i,j) = B(i,k) * C(k,j)" -d=B:100,100 -d=C:100,100 -d=A:100,100 -f=B:dd -f=C:dd -f=A:dd -cuda -write-source=tmp.c
#ifndef TACO_C_HEADERS
#define TACO_C_HEADERS
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <math.h>
#include <thrust/complex.h>
#define TACO_MIN(_a,_b) ((_a) < (_b) ? (_a) : (_b))
#define TACO_MAX(_a,_b) ((_a) > (_b) ? (_a) : (_b))
#define TACO_DEREF(_a) (((___context___*)(*__ctx__))->_a)
#ifndef TACO_TENSOR_T_DEFINED
#define TACO_TENSOR_T_DEFINED
typedef enum { taco_mode_dense, taco_mode_sparse } taco_mode_t;
typedef struct {
int32_t order; // tensor order (number of modes)
int32_t* dimensions; // tensor dimensions
int32_t csize; // component size
int32_t* mode_ordering; // mode storage ordering
taco_mode_t* mode_types; // mode storage types
uint8_t*** indices; // tensor index data (per mode)
uint8_t* vals; // tensor values
int32_t vals_size; // values array size
} taco_tensor_t;
#endif
#endif
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__
void computeDeviceKernel0(taco_tensor_t *A, taco_tensor_t *B, taco_tensor_t *C){
int A2_dimension = (int)(A->dimensions[1]);
double* __restrict__ A_vals = (double*)(A->vals);
int B1_dimension = (int)(B->dimensions[0]);
int B2_dimension = (int)(B->dimensions[1]);
double* __restrict__ B_vals = (double*)(B->vals);
int C1_dimension = (int)(C->dimensions[0]);
int C2_dimension = (int)(C->dimensions[1]);
double* __restrict__ C_vals = (double*)(C->vals);
int32_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= B1_dimension) {
return;
}
for (int32_t k = 0; k < C1_dimension; k++) {
int32_t pB2 = i * B2_dimension + k;
for (int32_t j = 0; j < C2_dimension; j++) {
int32_t pA2 = i * A2_dimension + j;
int32_t pC2 = k * C2_dimension + j;
A_vals[pA2] = A_vals[pA2] + B_vals[pB2] * C_vals[pC2];
}
}
}
int compute(taco_tensor_t *A, taco_tensor_t *B, taco_tensor_t *C) {
int A1_dimension = (int)(A->dimensions[0]);
int A2_dimension = (int)(A->dimensions[1]);
double* __restrict__ A_vals = (double*)(A->vals);
int B1_dimension = (int)(B->dimensions[0]);
#pragma omp parallel for
for (int32_t pA = 0; pA < (A1_dimension * A2_dimension); pA++) {
A_vals[pA] = 0.0;
}
computeDeviceKernel0<<<(B1_dimension + 255) / 256, 256>>>(A, B, C);
cudaDeviceSynchronize();
return 0;
}
int assemble(taco_tensor_t *A, taco_tensor_t *B, taco_tensor_t *C) {
int A1_dimension = (int)(A->dimensions[0]);
int A2_dimension = (int)(A->dimensions[1]);
double* __restrict__ A_vals = (double*)(A->vals);
gpuErrchk(cudaMallocManaged((void**)&A_vals, sizeof(double) * A1_dimension * A2_dimension));
A->vals = (uint8_t*)A_vals;
return 0;
}
__global__
void evaluateDeviceKernel0(taco_tensor_t *A, taco_tensor_t *B, taco_tensor_t *C){
int A2_dimension = (int)(A->dimensions[1]);
double* __restrict__ A_vals = (double*)(A->vals);
int B1_dimension = (int)(B->dimensions[0]);
int B2_dimension = (int)(B->dimensions[1]);
double* __restrict__ B_vals = (double*)(B->vals);
int C1_dimension = (int)(C->dimensions[0]);
int C2_dimension = (int)(C->dimensions[1]);
double* __restrict__ C_vals = (double*)(C->vals);
int32_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= B1_dimension) {
return;
}
for (int32_t k = 0; k < C1_dimension; k++) {
int32_t pB2 = i * B2_dimension + k;
for (int32_t j = 0; j < C2_dimension; j++) {
int32_t pA2 = i * A2_dimension + j;
int32_t pC2 = k * C2_dimension + j;
A_vals[pA2] = A_vals[pA2] + B_vals[pB2] * C_vals[pC2];
}
}
}
int evaluate(taco_tensor_t *A, taco_tensor_t *B, taco_tensor_t *C) {
int A1_dimension = (int)(A->dimensions[0]);
int A2_dimension = (int)(A->dimensions[1]);
double* __restrict__ A_vals = (double*)(A->vals);
int B1_dimension = (int)(B->dimensions[0]);
int32_t A_capacity = A1_dimension * A2_dimension;
gpuErrchk(cudaMallocManaged((void**)&A_vals, sizeof(double) * A_capacity));
#pragma omp parallel for
for (int32_t pA = 0; pA < A_capacity; pA++) {
A_vals[pA] = 0.0;
}
evaluateDeviceKernel0<<<(B1_dimension + 255) / 256, 256>>>(A, B, C);
cudaDeviceSynchronize();
A->vals = (uint8_t*)A_vals;
return 0;
}
|
testocean.c | extern double fabs(double );
typedef int __int32_t;
typedef long long __int64_t;
typedef long unsigned int __darwin_size_t;
typedef long __darwin_time_t;
typedef __int64_t __darwin_off_t;
typedef __int32_t __darwin_suseconds_t;
typedef __darwin_size_t size_t;
struct timeval {
__darwin_time_t tv_sec;
__darwin_suseconds_t tv_usec;
} ;
void exit(int );
void *malloc(size_t __size);
int strcmp(const char *__s1, const char *__s2);
extern int omp_get_thread_num(void );
typedef __darwin_off_t fpos_t;
struct __sbuf {
unsigned char *_base;
int _size;
} ;
struct __sFILEX ;
struct __sFILE {
unsigned char *_p;
int _r;
int _w;
short _flags;
short _file;
struct __sbuf _bf;
int _lbfsize;
void *_cookie;
int ( *_close )(void *);
int ( *_read )(void *, char * , int );
fpos_t ( *_seek )(void *, fpos_t , int );
int ( *_write )(void *, const char * , int );
struct __sbuf _ub;
struct __sFILEX *_extra;
int _ur;
unsigned char _ubuf[3];
unsigned char _nbuf[1];
struct __sbuf _lb;
int _blksize;
fpos_t _offset;
} ;
typedef struct __sFILE FILE;
extern FILE *__stderrp;
int fprintf(FILE *restrict , const char *restrict , ...);
int printf(const char *restrict , ...);
int scanf(const char *restrict , ...);
int sscanf(const char *restrict , const char *restrict , ...);
int gettimeofday(struct timeval *restrict , void *restrict );
int min(int a, int b) {
int _imopVarPre142;
int _imopVarPre143;
_imopVarPre142 = a <= b;
if (_imopVarPre142) {
_imopVarPre143 = a;
} else {
_imopVarPre143 = b;
}
return _imopVarPre143;
}
int simulate_ocean_currents(double **A, int n , double tol) {
int done = 0;
double diff;
double old;
int iter = 0;
double **B;
double **C;
unsigned long int _imopVarPre146;
void *_imopVarPre147;
_imopVarPre146 = n * sizeof(double *);
_imopVarPre147 = malloc(_imopVarPre146);
B = (double **) _imopVarPre147;
int k;
for (k = 0; k < n; k++) {
unsigned long int _imopVarPre150;
void *_imopVarPre151;
_imopVarPre150 = n * sizeof(double);
_imopVarPre151 = malloc(_imopVarPre150);
B[k] = (double *) _imopVarPre151;
double *_imopVarPre159;
unsigned int _imopVarPre160;
unsigned long int _imopVarPre161;
double *_imopVarPre162;
double *_imopVarPre163;
_imopVarPre159 = B[k];
_imopVarPre160 = __builtin_object_size(_imopVarPre159, 0);
_imopVarPre161 = n * sizeof(double);
_imopVarPre162 = A[k];
_imopVarPre163 = B[k];
__builtin___memcpy_chk(_imopVarPre163, _imopVarPre162, _imopVarPre161, _imopVarPre160);
}
while (!done) {
iter++;
diff = 0;
int i;
int j;
for (i = 1; i < n - 1; ++i) {
for (j = 1; j < n - 1; ++j) {
old = A[i][j];
B[i][j] = (A[i][j] + A[i][j - 1] + A[i - 1][j] + A[i][j + 1] + A[i + 1][j]) / 5.0;
double _imopVarPre165;
double _imopVarPre166;
_imopVarPre165 = B[i][j] - old;
_imopVarPre166 = fabs(_imopVarPre165);
diff += _imopVarPre166;
}
}
C = A;
A = B;
B = C;
if (diff / (n * n) < tol) {
done = 1;
}
}
return iter;
}
int simulate_ocean_currents_parallel(double **A, int dim , double tol , int procs) {
double **B;
double **C;
int chunk = 1 + (dim - 3) / procs;
int done = 0;
int iter = 0;
double diff = 0;
#pragma omp parallel num_threads(procs) shared(A, B, dim)
{
void *_imopVarPre170;
unsigned long int _imopVarPre169;
#pragma omp master
{
_imopVarPre169 = dim * sizeof(double *);
_imopVarPre170 = malloc(_imopVarPre169);
B = (double **) _imopVarPre170;
}
// #pragma omp dummyFlush BARRIER_START written([globalCell]) read([globalCell])
#pragma omp barrier
int _imopVarPre171;
_imopVarPre171 = omp_get_thread_num();
int tid = _imopVarPre171;
int _imopVarPre173;
int _imopVarPre174;
_imopVarPre173 = tid * dim / procs;
_imopVarPre174 = min(dim, _imopVarPre173);
int start = _imopVarPre174;
int _imopVarPre176;
int _imopVarPre177;
_imopVarPre176 = (tid + 1) * dim / procs;
_imopVarPre177 = min(dim, _imopVarPre176);
int end = _imopVarPre177;
int i;
for (i = start; i < end; ++i) {
unsigned long int _imopVarPre180;
void *_imopVarPre181;
_imopVarPre180 = dim * sizeof(double);
_imopVarPre181 = malloc(_imopVarPre180);
B[i] = (double *) _imopVarPre181;
double *_imopVarPre189;
unsigned int _imopVarPre190;
unsigned long int _imopVarPre191;
double *_imopVarPre192;
double *_imopVarPre193;
_imopVarPre189 = B[i];
_imopVarPre190 = __builtin_object_size(_imopVarPre189, 0);
_imopVarPre191 = dim * sizeof(double);
_imopVarPre192 = A[i];
_imopVarPre193 = B[i];
__builtin___memcpy_chk(_imopVarPre193, _imopVarPre192, _imopVarPre191, _imopVarPre190);
}
}
#pragma omp parallel num_threads(procs) firstprivate(done)
{
int _imopVarPre194;
_imopVarPre194 = omp_get_thread_num();
int tid = _imopVarPre194;
int _imopVarPre198;
int _imopVarPre199;
int _imopVarPre200;
_imopVarPre198 = tid * chunk;
_imopVarPre199 = dim - 2;
_imopVarPre200 = min(_imopVarPre199, _imopVarPre198);
int start = 1 + _imopVarPre200;
int _imopVarPre204;
int _imopVarPre205;
int _imopVarPre206;
_imopVarPre204 = (tid + 1) * chunk;
_imopVarPre205 = dim - 2;
_imopVarPre206 = min(_imopVarPre205, _imopVarPre204);
int end = 1 + _imopVarPre206;
double old;
double mydiff;
int i;
int j;
while (!done) {
/*A nowait clause was added to this construct to make its barrier explicit.*/
#pragma omp single nowait
{
iter++;
}
diff = 0;
// #pragma omp dummyFlush BARRIER_START written([globalCell]) read([globalCell])
#pragma omp barrier
mydiff = 0;
for (i = start; i < end; ++i) {
for (j = 1; j < dim - 1; ++j) {
old = A[i][j];
B[i][j] = (A[i][j] + A[i][j - 1] + A[i - 1][j] + A[i][j + 1] + A[i + 1][j]) / 5.0;
double _imopVarPre208;
double _imopVarPre209;
_imopVarPre208 = B[i][j] - old;
_imopVarPre209 = fabs(_imopVarPre208);
mydiff += _imopVarPre209;
}
}
// #pragma omp dummyFlush ATOMIC_START written([globalCell]) read([diff])
#pragma omp atomic
diff += mydiff;
// #pragma omp dummyFlush ATOMIC_END written([diff]) read([])
// #pragma omp dummyFlush BARRIER_START written([]) read([C, B, A, dim, diff, tol])
#pragma omp barrier
done = diff / (dim * dim) < tol;
/*A nowait clause was added to this construct to make its barrier explicit.*/
#pragma omp single nowait
{
C = A;
A = B;
B = C;
}
// #pragma omp dummyFlush BARRIER_START written([C, B, A]) read([globalCell])
/*This explicit barrier was added as a replacement for some implicit barier.*/
#pragma omp barrier
}
}
return iter;
}
double **read_input(int n) {
double **X;
unsigned long int _imopVarPre212;
void *_imopVarPre213;
_imopVarPre212 = n * sizeof(double *);
_imopVarPre213 = malloc(_imopVarPre212);
X = (double **) _imopVarPre213;
int i;
int j;
for (i = 0; i < n; ++i) {
unsigned long int _imopVarPre216;
void *_imopVarPre217;
_imopVarPre216 = n * sizeof(double);
_imopVarPre217 = malloc(_imopVarPre216);
X[i] = (double *) _imopVarPre217;
for (j = 0; j < n; ++j) {
double *_imopVarPre219;
_imopVarPre219 = &X[i][j];
scanf("%lf", _imopVarPre219);
}
}
return X;
}
void print_output(double **A, int n , int niter) {
printf("Number of iterations = %d\n", niter);
int i;
int j;
for (i = 0; i < n; ++i) {
for (j = 0; j < n; ++j) {
double _imopVarPre221;
_imopVarPre221 = A[i][j];
printf("%lf ", _imopVarPre221);
}
printf("\n");
}
printf("\n");
}
void print_statistics(struct timeval start_time, struct timeval end_time) {
double _imopVarPre223;
_imopVarPre223 = start_time.tv_sec + (start_time.tv_usec / 1000000.0);
printf("Start time:\t%lf \n", _imopVarPre223);
double _imopVarPre225;
_imopVarPre225 = end_time.tv_sec + (end_time.tv_usec / 1000000.0);
printf("End time:\t%lf\n", _imopVarPre225);
double _imopVarPre227;
_imopVarPre227 = end_time.tv_sec - start_time.tv_sec + ((end_time.tv_usec - start_time.tv_usec) / 1000000.0);
printf("Total time: \t%lf (s)\n", _imopVarPre227);
}
void print_usage_and_exit(char *prog) {
fprintf(__stderrp, "Usage: %s <nprocs> <tol> <-serial|-parallel>\n", prog);
exit(1);
}
int main(int argc, char **argv) {
struct timeval start_time;
struct timeval end_time;
int num_iter = 0;
double tol;
double **A;
int procs;
int dim;
if (argc != 4) {
char *_imopVarPre229;
_imopVarPre229 = argv[0];
print_usage_and_exit(_imopVarPre229);
}
int *_imopVarPre232;
char *_imopVarPre233;
_imopVarPre232 = &procs;
_imopVarPre233 = argv[1];
sscanf(_imopVarPre233, "%d", _imopVarPre232);
double *_imopVarPre236;
char *_imopVarPre237;
_imopVarPre236 = &tol;
_imopVarPre237 = argv[2];
sscanf(_imopVarPre237, "%lf", _imopVarPre236);
char *option = argv[3];
int _imopVarPre238;
int _imopVarPre248;
int _imopVarPre249;
int _imopVarPre250;
_imopVarPre238 = option == ((void *) 0);
if (!_imopVarPre238) {
_imopVarPre248 = strcmp(option, "-serial");
_imopVarPre249 = _imopVarPre248 != 0;
if (_imopVarPre249) {
_imopVarPre250 = strcmp(option, "-parallel");
_imopVarPre249 = _imopVarPre250 != 0;
}
_imopVarPre238 = _imopVarPre249;
}
if (_imopVarPre238) {
char *_imopVarPre252;
_imopVarPre252 = argv[0];
print_usage_and_exit(_imopVarPre252);
}
printf("Options: Procs = %d, Tol = %lf, Execution%s\n\n", procs, tol, option);
int *_imopVarPre254;
_imopVarPre254 = &dim;
scanf("%d", _imopVarPre254);
A = read_input(dim);
void *_imopVarPre257;
struct timeval *_imopVarPre258;
_imopVarPre257 = ((void *) 0);
_imopVarPre258 = &start_time;
gettimeofday(_imopVarPre258, _imopVarPre257);
int _imopVarPre260;
_imopVarPre260 = strcmp(option, "-serial");
if (_imopVarPre260 == 0) {
num_iter = simulate_ocean_currents(A, dim, tol);
} else {
num_iter = simulate_ocean_currents_parallel(A, dim, tol, procs);
}
void *_imopVarPre263;
struct timeval *_imopVarPre264;
_imopVarPre263 = ((void *) 0);
_imopVarPre264 = &end_time;
gettimeofday(_imopVarPre264, _imopVarPre263);
print_output(A, dim, num_iter);
print_statistics(start_time, end_time);
}
|
DX.c | /* Generated by Cython 0.26.1 */
#define PY_SSIZE_T_CLEAN
#include "Python.h"
#ifndef Py_PYTHON_H
#error Python headers needed to compile C extensions, please install development version of Python.
#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03020000)
#error Cython requires Python 2.6+ or Python 3.2+.
#else
#define CYTHON_ABI "0_26_1"
#include <stddef.h>
#ifndef offsetof
#define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
#endif
#if !defined(WIN32) && !defined(MS_WINDOWS)
#ifndef __stdcall
#define __stdcall
#endif
#ifndef __cdecl
#define __cdecl
#endif
#ifndef __fastcall
#define __fastcall
#endif
#endif
#ifndef DL_IMPORT
#define DL_IMPORT(t) t
#endif
#ifndef DL_EXPORT
#define DL_EXPORT(t) t
#endif
#define __PYX_COMMA ,
#ifndef HAVE_LONG_LONG
#if PY_VERSION_HEX >= 0x03030000 || (PY_MAJOR_VERSION == 2 && PY_VERSION_HEX >= 0x02070000)
#define HAVE_LONG_LONG
#endif
#endif
#ifndef PY_LONG_LONG
#define PY_LONG_LONG LONG_LONG
#endif
#ifndef Py_HUGE_VAL
#define Py_HUGE_VAL HUGE_VAL
#endif
#ifdef PYPY_VERSION
#define CYTHON_COMPILING_IN_PYPY 1
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 0
#undef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 0
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#undef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 0
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#undef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 1
#undef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 0
#undef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 0
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#elif defined(PYSTON_VERSION)
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 1
#define CYTHON_COMPILING_IN_CPYTHON 0
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#else
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 1
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#if PY_VERSION_HEX < 0x02070000
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#elif !defined(CYTHON_USE_PYTYPE_LOOKUP)
#define CYTHON_USE_PYTYPE_LOOKUP 1
#endif
#if PY_MAJOR_VERSION < 3
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#elif !defined(CYTHON_USE_ASYNC_SLOTS)
#define CYTHON_USE_ASYNC_SLOTS 1
#endif
#if PY_VERSION_HEX < 0x02070000
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#elif !defined(CYTHON_USE_PYLONG_INTERNALS)
#define CYTHON_USE_PYLONG_INTERNALS 1
#endif
#ifndef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 1
#endif
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#if PY_VERSION_HEX < 0x030300F0
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#elif !defined(CYTHON_USE_UNICODE_WRITER)
#define CYTHON_USE_UNICODE_WRITER 1
#endif
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#ifndef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 1
#endif
#ifndef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 1
#endif
#endif
#if !defined(CYTHON_FAST_PYCCALL)
#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1)
#endif
#if CYTHON_USE_PYLONG_INTERNALS
#include "longintrepr.h"
#undef SHIFT
#undef BASE
#undef MASK
#endif
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag)
#define Py_OptimizeFlag 0
#endif
#define __PYX_BUILD_PY_SSIZE_T "n"
#define CYTHON_FORMAT_SSIZE_T "z"
#if PY_MAJOR_VERSION < 3
#define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyClass_Type
#else
#define __Pyx_BUILTIN_MODULE_NAME "builtins"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyType_Type
#endif
#ifndef Py_TPFLAGS_CHECKTYPES
#define Py_TPFLAGS_CHECKTYPES 0
#endif
#ifndef Py_TPFLAGS_HAVE_INDEX
#define Py_TPFLAGS_HAVE_INDEX 0
#endif
#ifndef Py_TPFLAGS_HAVE_NEWBUFFER
#define Py_TPFLAGS_HAVE_NEWBUFFER 0
#endif
#ifndef Py_TPFLAGS_HAVE_FINALIZE
#define Py_TPFLAGS_HAVE_FINALIZE 0
#endif
#if PY_VERSION_HEX < 0x030700A0 || !defined(METH_FASTCALL)
#ifndef METH_FASTCALL
#define METH_FASTCALL 0x80
#endif
typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject **args, Py_ssize_t nargs);
typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject **args,
Py_ssize_t nargs, PyObject *kwnames);
#else
#define __Pyx_PyCFunctionFast _PyCFunctionFast
#define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords
#endif
#if CYTHON_FAST_PYCCALL
#define __Pyx_PyFastCFunction_Check(func)\
((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS)))))
#else
#define __Pyx_PyFastCFunction_Check(func) 0
#endif
#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
#define CYTHON_PEP393_ENABLED 1
#define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\
0 : _PyUnicode_Ready((PyObject *)(op)))
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u)
#define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u)
#define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u)
#define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i)
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch)
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u)))
#else
#define CYTHON_PEP393_ENABLED 0
#define PyUnicode_1BYTE_KIND 1
#define PyUnicode_2BYTE_KIND 2
#define PyUnicode_4BYTE_KIND 4
#define __Pyx_PyUnicode_READY(op) (0)
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i]))
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111)
#define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE))
#define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u))
#define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i]))
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch)
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u))
#endif
#if CYTHON_COMPILING_IN_PYPY
#define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b)
#else
#define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\
PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains)
#define PyUnicode_Contains(u, s) PySequence_Contains(u, s)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check)
#define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format)
#define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc)
#define PyObject_Malloc(s) PyMem_Malloc(s)
#define PyObject_Free(p) PyMem_Free(p)
#define PyObject_Realloc(p) PyMem_Realloc(p)
#endif
#if CYTHON_COMPILING_IN_PYSTON
#define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno)
#else
#define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno)
#endif
#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b)
#else
#define __Pyx_PyString_Format(a, b) PyString_Format(a, b)
#endif
#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII)
#define PyObject_ASCII(o) PyObject_Repr(o)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBaseString_Type PyUnicode_Type
#define PyStringObject PyUnicodeObject
#define PyString_Type PyUnicode_Type
#define PyString_Check PyUnicode_Check
#define PyString_CheckExact PyUnicode_CheckExact
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)
#define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)
#else
#define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj))
#define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj))
#endif
#ifndef PySet_CheckExact
#define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
#endif
#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception)
#if PY_MAJOR_VERSION >= 3
#define PyIntObject PyLongObject
#define PyInt_Type PyLong_Type
#define PyInt_Check(op) PyLong_Check(op)
#define PyInt_CheckExact(op) PyLong_CheckExact(op)
#define PyInt_FromString PyLong_FromString
#define PyInt_FromUnicode PyLong_FromUnicode
#define PyInt_FromLong PyLong_FromLong
#define PyInt_FromSize_t PyLong_FromSize_t
#define PyInt_FromSsize_t PyLong_FromSsize_t
#define PyInt_AsLong PyLong_AsLong
#define PyInt_AS_LONG PyLong_AS_LONG
#define PyInt_AsSsize_t PyLong_AsSsize_t
#define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
#define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
#define PyNumber_Int PyNumber_Long
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBoolObject PyLongObject
#endif
#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY
#ifndef PyUnicode_InternFromString
#define PyUnicode_InternFromString(s) PyUnicode_FromString(s)
#endif
#endif
#if PY_VERSION_HEX < 0x030200A4
typedef long Py_hash_t;
#define __Pyx_PyInt_FromHash_t PyInt_FromLong
#define __Pyx_PyInt_AsHash_t PyInt_AsLong
#else
#define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
#define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func))
#else
#define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass)
#endif
#ifndef __has_attribute
#define __has_attribute(x) 0
#endif
#ifndef __has_cpp_attribute
#define __has_cpp_attribute(x) 0
#endif
#if CYTHON_USE_ASYNC_SLOTS
#if PY_VERSION_HEX >= 0x030500B1
#define __Pyx_PyAsyncMethodsStruct PyAsyncMethods
#define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async)
#else
typedef struct {
unaryfunc am_await;
unaryfunc am_aiter;
unaryfunc am_anext;
} __Pyx_PyAsyncMethodsStruct;
#define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved))
#endif
#else
#define __Pyx_PyType_AsAsync(obj) NULL
#endif
#ifndef CYTHON_RESTRICT
#if defined(__GNUC__)
#define CYTHON_RESTRICT __restrict__
#elif defined(_MSC_VER) && _MSC_VER >= 1400
#define CYTHON_RESTRICT __restrict
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_RESTRICT restrict
#else
#define CYTHON_RESTRICT
#endif
#endif
#ifndef CYTHON_UNUSED
# if defined(__GNUC__)
# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
#endif
#ifndef CYTHON_MAYBE_UNUSED_VAR
# if defined(__cplusplus)
template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { }
# else
# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x)
# endif
#endif
#ifndef CYTHON_NCP_UNUSED
# if CYTHON_COMPILING_IN_CPYTHON
# define CYTHON_NCP_UNUSED
# else
# define CYTHON_NCP_UNUSED CYTHON_UNUSED
# endif
#endif
#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None)
#ifdef _MSC_VER
#ifndef _MSC_STDINT_H_
#if _MSC_VER < 1300
typedef unsigned char uint8_t;
typedef unsigned int uint32_t;
#else
typedef unsigned __int8 uint8_t;
typedef unsigned __int32 uint32_t;
#endif
#endif
#else
#include <stdint.h>
#endif
#ifndef CYTHON_FALLTHROUGH
#ifdef __cplusplus
#if __has_cpp_attribute(fallthrough)
#define CYTHON_FALLTHROUGH [[fallthrough]]
#elif __has_cpp_attribute(clang::fallthrough)
#define CYTHON_FALLTHROUGH [[clang::fallthrough]]
#endif
#endif
#ifndef CYTHON_FALLTHROUGH
#if __has_attribute(fallthrough) || (defined(__GNUC__) && defined(__attribute__))
#define CYTHON_FALLTHROUGH __attribute__((fallthrough))
#else
#define CYTHON_FALLTHROUGH
#endif
#endif
#endif
#ifndef CYTHON_INLINE
#if defined(__clang__)
#define CYTHON_INLINE __inline__ __attribute__ ((__unused__))
#elif defined(__GNUC__)
#define CYTHON_INLINE __inline__
#elif defined(_MSC_VER)
#define CYTHON_INLINE __inline
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_INLINE inline
#else
#define CYTHON_INLINE
#endif
#endif
#if defined(WIN32) || defined(MS_WINDOWS)
#define _USE_MATH_DEFINES
#endif
#include <math.h>
#ifdef NAN
#define __PYX_NAN() ((float) NAN)
#else
static CYTHON_INLINE float __PYX_NAN() {
float value;
memset(&value, 0xFF, sizeof(value));
return value;
}
#endif
#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL)
#define __Pyx_truncl trunc
#else
#define __Pyx_truncl truncl
#endif
#define __PYX_ERR(f_index, lineno, Ln_error) \
{ \
__pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \
}
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
#else
#define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
#endif
#ifndef __PYX_EXTERN_C
#ifdef __cplusplus
#define __PYX_EXTERN_C extern "C"
#else
#define __PYX_EXTERN_C extern
#endif
#endif
#define __PYX_HAVE__DX
#define __PYX_HAVE_API__DX
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include "numpy/arrayobject.h"
#include "numpy/ufuncobject.h"
#include <math.h>
#ifdef _OPENMP
#include <omp.h>
#endif /* _OPENMP */
#ifdef PYREX_WITHOUT_ASSERTIONS
#define CYTHON_WITHOUT_ASSERTIONS
#endif
typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding;
const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry;
#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0
#define __PYX_DEFAULT_STRING_ENCODING ""
#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#define __Pyx_uchar_cast(c) ((unsigned char)c)
#define __Pyx_long_cast(x) ((long)x)
#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\
(sizeof(type) < sizeof(Py_ssize_t)) ||\
(sizeof(type) > sizeof(Py_ssize_t) &&\
likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX) &&\
(!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\
v == (type)PY_SSIZE_T_MIN))) ||\
(sizeof(type) == sizeof(Py_ssize_t) &&\
(is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX))) )
#if defined (__cplusplus) && __cplusplus >= 201103L
#include <cstdlib>
#define __Pyx_sst_abs(value) std::abs(value)
#elif SIZEOF_INT >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) abs(value)
#elif SIZEOF_LONG >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) labs(value)
#elif defined (_MSC_VER) && defined (_M_X64)
#define __Pyx_sst_abs(value) _abs64(value)
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define __Pyx_sst_abs(value) llabs(value)
#elif defined (__GNUC__)
#define __Pyx_sst_abs(value) __builtin_llabs(value)
#else
#define __Pyx_sst_abs(value) ((value<0) ? -value : value)
#endif
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*);
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s))
#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l)
#define __Pyx_PyBytes_FromString PyBytes_FromString
#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*);
#if PY_MAJOR_VERSION < 3
#define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#else
#define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize
#endif
#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s)
#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s)
#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s)
#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s)
#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s)
#if PY_MAJOR_VERSION < 3
static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u)
{
const Py_UNICODE *u_end = u;
while (*u_end++) ;
return (size_t)(u_end - u - 1);
}
#else
#define __Pyx_Py_UNICODE_strlen Py_UNICODE_strlen
#endif
#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))
#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode
#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode
#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj)
#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None)
#define __Pyx_PyBool_FromLong(b) ((b) ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False))
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x);
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
#if CYTHON_ASSUME_SAFE_MACROS
#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
#else
#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x)
#endif
#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x))
#else
#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x))
#endif
#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x))
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
static int __Pyx_sys_getdefaultencoding_not_ascii;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
PyObject* ascii_chars_u = NULL;
PyObject* ascii_chars_b = NULL;
const char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
if (strcmp(default_encoding_c, "ascii") == 0) {
__Pyx_sys_getdefaultencoding_not_ascii = 0;
} else {
char ascii_chars[128];
int c;
for (c = 0; c < 128; c++) {
ascii_chars[c] = c;
}
__Pyx_sys_getdefaultencoding_not_ascii = 1;
ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL);
if (!ascii_chars_u) goto bad;
ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL);
if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) {
PyErr_Format(
PyExc_ValueError,
"This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.",
default_encoding_c);
goto bad;
}
Py_DECREF(ascii_chars_u);
Py_DECREF(ascii_chars_b);
}
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
Py_XDECREF(ascii_chars_u);
Py_XDECREF(ascii_chars_b);
return -1;
}
#endif
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL)
#else
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL)
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
static char* __PYX_DEFAULT_STRING_ENCODING;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
__PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c));
if (!__PYX_DEFAULT_STRING_ENCODING) goto bad;
strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c);
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
return -1;
}
#endif
#endif
/* Test for GCC > 2.95 */
#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#else /* !__GNUC__ or GCC < 2.95 */
#define likely(x) (x)
#define unlikely(x) (x)
#endif /* __GNUC__ */
static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; }
static PyObject *__pyx_m;
static PyObject *__pyx_d;
static PyObject *__pyx_b;
static PyObject *__pyx_cython_runtime;
static PyObject *__pyx_empty_tuple;
static PyObject *__pyx_empty_bytes;
static PyObject *__pyx_empty_unicode;
static int __pyx_lineno;
static int __pyx_clineno = 0;
static const char * __pyx_cfilenm= __FILE__;
static const char *__pyx_filename;
/* Header.proto */
#if !defined(CYTHON_CCOMPLEX)
#if defined(__cplusplus)
#define CYTHON_CCOMPLEX 1
#elif defined(_Complex_I)
#define CYTHON_CCOMPLEX 1
#else
#define CYTHON_CCOMPLEX 0
#endif
#endif
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
#include <complex>
#else
#include <complex.h>
#endif
#endif
#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__)
#undef _Complex_I
#define _Complex_I 1.0fj
#endif
static const char *__pyx_f[] = {
"DX.pyx",
"__init__.pxd",
"type.pxd",
};
/* BufferFormatStructs.proto */
#define IS_UNSIGNED(type) (((type) -1) > 0)
struct __Pyx_StructField_;
#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0)
typedef struct {
const char* name;
struct __Pyx_StructField_* fields;
size_t size;
size_t arraysize[8];
int ndim;
char typegroup;
char is_unsigned;
int flags;
} __Pyx_TypeInfo;
typedef struct __Pyx_StructField_ {
__Pyx_TypeInfo* type;
const char* name;
size_t offset;
} __Pyx_StructField;
typedef struct {
__Pyx_StructField* field;
size_t parent_offset;
} __Pyx_BufFmt_StackElem;
typedef struct {
__Pyx_StructField root;
__Pyx_BufFmt_StackElem* head;
size_t fmt_offset;
size_t new_count, enc_count;
size_t struct_alignment;
int is_complex;
char enc_type;
char new_packmode;
char enc_packmode;
char is_valid_array;
} __Pyx_BufFmt_Context;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":725
* # in Cython to enable them only on the right systems.
*
* ctypedef npy_int8 int8_t # <<<<<<<<<<<<<<
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t
*/
typedef npy_int8 __pyx_t_5numpy_int8_t;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":726
*
* ctypedef npy_int8 int8_t
* ctypedef npy_int16 int16_t # <<<<<<<<<<<<<<
* ctypedef npy_int32 int32_t
* ctypedef npy_int64 int64_t
*/
typedef npy_int16 __pyx_t_5numpy_int16_t;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":727
* ctypedef npy_int8 int8_t
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t # <<<<<<<<<<<<<<
* ctypedef npy_int64 int64_t
* #ctypedef npy_int96 int96_t
*/
typedef npy_int32 __pyx_t_5numpy_int32_t;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":728
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t
* ctypedef npy_int64 int64_t # <<<<<<<<<<<<<<
* #ctypedef npy_int96 int96_t
* #ctypedef npy_int128 int128_t
*/
typedef npy_int64 __pyx_t_5numpy_int64_t;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":732
* #ctypedef npy_int128 int128_t
*
* ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<<
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t
*/
typedef npy_uint8 __pyx_t_5numpy_uint8_t;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":733
*
* ctypedef npy_uint8 uint8_t
* ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<<
* ctypedef npy_uint32 uint32_t
* ctypedef npy_uint64 uint64_t
*/
typedef npy_uint16 __pyx_t_5numpy_uint16_t;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":734
* ctypedef npy_uint8 uint8_t
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<<
* ctypedef npy_uint64 uint64_t
* #ctypedef npy_uint96 uint96_t
*/
typedef npy_uint32 __pyx_t_5numpy_uint32_t;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":735
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t
* ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<<
* #ctypedef npy_uint96 uint96_t
* #ctypedef npy_uint128 uint128_t
*/
typedef npy_uint64 __pyx_t_5numpy_uint64_t;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":739
* #ctypedef npy_uint128 uint128_t
*
* ctypedef npy_float32 float32_t # <<<<<<<<<<<<<<
* ctypedef npy_float64 float64_t
* #ctypedef npy_float80 float80_t
*/
typedef npy_float32 __pyx_t_5numpy_float32_t;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":740
*
* ctypedef npy_float32 float32_t
* ctypedef npy_float64 float64_t # <<<<<<<<<<<<<<
* #ctypedef npy_float80 float80_t
* #ctypedef npy_float128 float128_t
*/
typedef npy_float64 __pyx_t_5numpy_float64_t;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":749
* # The int types are mapped a bit surprising --
* # numpy.int corresponds to 'l' and numpy.long to 'q'
* ctypedef npy_long int_t # <<<<<<<<<<<<<<
* ctypedef npy_longlong long_t
* ctypedef npy_longlong longlong_t
*/
typedef npy_long __pyx_t_5numpy_int_t;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":750
* # numpy.int corresponds to 'l' and numpy.long to 'q'
* ctypedef npy_long int_t
* ctypedef npy_longlong long_t # <<<<<<<<<<<<<<
* ctypedef npy_longlong longlong_t
*
*/
typedef npy_longlong __pyx_t_5numpy_long_t;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":751
* ctypedef npy_long int_t
* ctypedef npy_longlong long_t
* ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<<
*
* ctypedef npy_ulong uint_t
*/
typedef npy_longlong __pyx_t_5numpy_longlong_t;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":753
* ctypedef npy_longlong longlong_t
*
* ctypedef npy_ulong uint_t # <<<<<<<<<<<<<<
* ctypedef npy_ulonglong ulong_t
* ctypedef npy_ulonglong ulonglong_t
*/
typedef npy_ulong __pyx_t_5numpy_uint_t;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":754
*
* ctypedef npy_ulong uint_t
* ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<<
* ctypedef npy_ulonglong ulonglong_t
*
*/
typedef npy_ulonglong __pyx_t_5numpy_ulong_t;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":755
* ctypedef npy_ulong uint_t
* ctypedef npy_ulonglong ulong_t
* ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<<
*
* ctypedef npy_intp intp_t
*/
typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":757
* ctypedef npy_ulonglong ulonglong_t
*
* ctypedef npy_intp intp_t # <<<<<<<<<<<<<<
* ctypedef npy_uintp uintp_t
*
*/
typedef npy_intp __pyx_t_5numpy_intp_t;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":758
*
* ctypedef npy_intp intp_t
* ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<<
*
* ctypedef npy_double float_t
*/
typedef npy_uintp __pyx_t_5numpy_uintp_t;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":760
* ctypedef npy_uintp uintp_t
*
* ctypedef npy_double float_t # <<<<<<<<<<<<<<
* ctypedef npy_double double_t
* ctypedef npy_longdouble longdouble_t
*/
typedef npy_double __pyx_t_5numpy_float_t;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":761
*
* ctypedef npy_double float_t
* ctypedef npy_double double_t # <<<<<<<<<<<<<<
* ctypedef npy_longdouble longdouble_t
*
*/
typedef npy_double __pyx_t_5numpy_double_t;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":762
* ctypedef npy_double float_t
* ctypedef npy_double double_t
* ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<<
*
* ctypedef npy_cfloat cfloat_t
*/
typedef npy_longdouble __pyx_t_5numpy_longdouble_t;
/* "DX.pyx":14
* from libc.math cimport exp
*
* ctypedef c_np.float_t DTYPE_t # <<<<<<<<<<<<<<
*
*
*/
typedef __pyx_t_5numpy_float_t __pyx_t_2DX_DTYPE_t;
/* Declarations.proto */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
typedef ::std::complex< float > __pyx_t_float_complex;
#else
typedef float _Complex __pyx_t_float_complex;
#endif
#else
typedef struct { float real, imag; } __pyx_t_float_complex;
#endif
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float);
/* Declarations.proto */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
typedef ::std::complex< double > __pyx_t_double_complex;
#else
typedef double _Complex __pyx_t_double_complex;
#endif
#else
typedef struct { double real, imag; } __pyx_t_double_complex;
#endif
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double);
/*--- Type declarations ---*/
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":764
* ctypedef npy_longdouble longdouble_t
*
* ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<<
* ctypedef npy_cdouble cdouble_t
* ctypedef npy_clongdouble clongdouble_t
*/
typedef npy_cfloat __pyx_t_5numpy_cfloat_t;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":765
*
* ctypedef npy_cfloat cfloat_t
* ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<<
* ctypedef npy_clongdouble clongdouble_t
*
*/
typedef npy_cdouble __pyx_t_5numpy_cdouble_t;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":766
* ctypedef npy_cfloat cfloat_t
* ctypedef npy_cdouble cdouble_t
* ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<<
*
* ctypedef npy_cdouble complex_t
*/
typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":768
* ctypedef npy_clongdouble clongdouble_t
*
* ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew1(a):
*/
typedef npy_cdouble __pyx_t_5numpy_complex_t;
/* --- Runtime support code (head) --- */
/* Refnanny.proto */
#ifndef CYTHON_REFNANNY
#define CYTHON_REFNANNY 0
#endif
#if CYTHON_REFNANNY
typedef struct {
void (*INCREF)(void*, PyObject*, int);
void (*DECREF)(void*, PyObject*, int);
void (*GOTREF)(void*, PyObject*, int);
void (*GIVEREF)(void*, PyObject*, int);
void* (*SetupContext)(const char*, int, const char*);
void (*FinishContext)(void**);
} __Pyx_RefNannyAPIStruct;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname);
#define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
#ifdef WITH_THREAD
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
if (acquire_gil) {\
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
PyGILState_Release(__pyx_gilstate_save);\
} else {\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
}
#else
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
#endif
#define __Pyx_RefNannyFinishContext()\
__Pyx_RefNanny->FinishContext(&__pyx_refnanny)
#define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)
#define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)
#define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)
#define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)
#else
#define __Pyx_RefNannyDeclarations
#define __Pyx_RefNannySetupContext(name, acquire_gil)
#define __Pyx_RefNannyFinishContext()
#define __Pyx_INCREF(r) Py_INCREF(r)
#define __Pyx_DECREF(r) Py_DECREF(r)
#define __Pyx_GOTREF(r)
#define __Pyx_GIVEREF(r)
#define __Pyx_XINCREF(r) Py_XINCREF(r)
#define __Pyx_XDECREF(r) Py_XDECREF(r)
#define __Pyx_XGOTREF(r)
#define __Pyx_XGIVEREF(r)
#endif
#define __Pyx_XDECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_XDECREF(tmp);\
} while (0)
#define __Pyx_DECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_DECREF(tmp);\
} while (0)
#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
/* PyObjectGetAttrStr.proto */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) {
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_getattro))
return tp->tp_getattro(obj, attr_name);
#if PY_MAJOR_VERSION < 3
if (likely(tp->tp_getattr))
return tp->tp_getattr(obj, PyString_AS_STRING(attr_name));
#endif
return PyObject_GetAttr(obj, attr_name);
}
#else
#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
#endif
/* GetBuiltinName.proto */
static PyObject *__Pyx_GetBuiltinName(PyObject *name);
/* RaiseArgTupleInvalid.proto */
static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found);
/* RaiseDoubleKeywords.proto */
static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name);
/* ParseKeywords.proto */
static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\
PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\
const char* function_name);
/* ArgTypeTest.proto */
static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed,
const char *name, int exact);
/* IsLittleEndian.proto */
static CYTHON_INLINE int __Pyx_Is_Little_Endian(void);
/* BufferFormatCheck.proto */
static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj,
__Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack);
static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info);
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts);
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type);
/* GetModuleGlobalName.proto */
static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name);
/* PyObjectCall.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw);
#else
#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
#endif
/* ExtTypeTest.proto */
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type);
/* NoFastGil.proto */
#define __Pyx_PyGILState_Ensure PyGILState_Ensure
#define __Pyx_PyGILState_Release PyGILState_Release
#define __Pyx_FastGIL_Remember()
#define __Pyx_FastGIL_Forget()
#define __Pyx_FastGilFuncInit()
#define __Pyx_BufPtrStrided1d(type, buf, i0, s0) (type)((char*)buf + i0 * s0)
#define __Pyx_BufPtrStrided3d(type, buf, i0, s0, i1, s1, i2, s2) (type)((char*)buf + i0 * s0 + i1 * s1 + i2 * s2)
#define __Pyx_BufPtrStrided2d(type, buf, i0, s0, i1, s1) (type)((char*)buf + i0 * s0 + i1 * s1)
#define __Pyx_BufPtrStrided4d(type, buf, i0, s0, i1, s1, i2, s2, i3, s3) (type)((char*)buf + i0 * s0 + i1 * s1 + i2 * s2 + i3 * s3)
/* PyThreadStateGet.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate;
#define __Pyx_PyThreadState_assign __pyx_tstate = PyThreadState_GET();
#else
#define __Pyx_PyThreadState_declare
#define __Pyx_PyThreadState_assign
#endif
/* PyErrFetchRestore.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#else
#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb)
#endif
/* RaiseException.proto */
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause);
/* DictGetItem.proto */
#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY
static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) {
PyObject *value;
value = PyDict_GetItemWithError(d, key);
if (unlikely(!value)) {
if (!PyErr_Occurred()) {
PyObject* args = PyTuple_Pack(1, key);
if (likely(args))
PyErr_SetObject(PyExc_KeyError, args);
Py_XDECREF(args);
}
return NULL;
}
Py_INCREF(value);
return value;
}
#else
#define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key)
#endif
/* RaiseTooManyValuesToUnpack.proto */
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected);
/* RaiseNeedMoreValuesToUnpack.proto */
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index);
/* RaiseNoneIterError.proto */
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void);
/* SaveResetException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
#else
#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb)
#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb)
#endif
/* PyErrExceptionMatches.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err)
static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err);
#else
#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err)
#endif
/* GetException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb)
static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#else
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb);
#endif
/* Import.proto */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level);
/* CLineInTraceback.proto */
static int __Pyx_CLineForTraceback(int c_line);
/* CodeObjectCache.proto */
typedef struct {
PyCodeObject* code_object;
int code_line;
} __Pyx_CodeObjectCacheEntry;
struct __Pyx_CodeObjectCache {
int count;
int max_count;
__Pyx_CodeObjectCacheEntry* entries;
};
static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
static PyCodeObject *__pyx_find_code_object(int code_line);
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
/* AddTraceback.proto */
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename);
/* BufferStructDeclare.proto */
typedef struct {
Py_ssize_t shape, strides, suboffsets;
} __Pyx_Buf_DimInfo;
typedef struct {
size_t refcount;
Py_buffer pybuffer;
} __Pyx_Buffer;
typedef struct {
__Pyx_Buffer *rcbuffer;
char *data;
__Pyx_Buf_DimInfo diminfo[8];
} __Pyx_LocalBuf_ND;
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags);
static void __Pyx_ReleaseBuffer(Py_buffer *view);
#else
#define __Pyx_GetBuffer PyObject_GetBuffer
#define __Pyx_ReleaseBuffer PyBuffer_Release
#endif
/* None.proto */
static Py_ssize_t __Pyx_zeros[] = {0, 0, 0, 0, 0, 0, 0, 0};
static Py_ssize_t __Pyx_minusones[] = {-1, -1, -1, -1, -1, -1, -1, -1};
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value);
/* RealImag.proto */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
#define __Pyx_CREAL(z) ((z).real())
#define __Pyx_CIMAG(z) ((z).imag())
#else
#define __Pyx_CREAL(z) (__real__(z))
#define __Pyx_CIMAG(z) (__imag__(z))
#endif
#else
#define __Pyx_CREAL(z) ((z).real)
#define __Pyx_CIMAG(z) ((z).imag)
#endif
#if defined(__cplusplus) && CYTHON_CCOMPLEX\
&& (defined(_WIN32) || defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )) || __cplusplus >= 201103)
#define __Pyx_SET_CREAL(z,x) ((z).real(x))
#define __Pyx_SET_CIMAG(z,y) ((z).imag(y))
#else
#define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x)
#define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y)
#endif
/* Arithmetic.proto */
#if CYTHON_CCOMPLEX
#define __Pyx_c_eq_float(a, b) ((a)==(b))
#define __Pyx_c_sum_float(a, b) ((a)+(b))
#define __Pyx_c_diff_float(a, b) ((a)-(b))
#define __Pyx_c_prod_float(a, b) ((a)*(b))
#define __Pyx_c_quot_float(a, b) ((a)/(b))
#define __Pyx_c_neg_float(a) (-(a))
#ifdef __cplusplus
#define __Pyx_c_is_zero_float(z) ((z)==(float)0)
#define __Pyx_c_conj_float(z) (::std::conj(z))
#if 1
#define __Pyx_c_abs_float(z) (::std::abs(z))
#define __Pyx_c_pow_float(a, b) (::std::pow(a, b))
#endif
#else
#define __Pyx_c_is_zero_float(z) ((z)==0)
#define __Pyx_c_conj_float(z) (conjf(z))
#if 1
#define __Pyx_c_abs_float(z) (cabsf(z))
#define __Pyx_c_pow_float(a, b) (cpowf(a, b))
#endif
#endif
#else
static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex);
static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex);
#if 1
static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex, __pyx_t_float_complex);
#endif
#endif
/* Arithmetic.proto */
#if CYTHON_CCOMPLEX
#define __Pyx_c_eq_double(a, b) ((a)==(b))
#define __Pyx_c_sum_double(a, b) ((a)+(b))
#define __Pyx_c_diff_double(a, b) ((a)-(b))
#define __Pyx_c_prod_double(a, b) ((a)*(b))
#define __Pyx_c_quot_double(a, b) ((a)/(b))
#define __Pyx_c_neg_double(a) (-(a))
#ifdef __cplusplus
#define __Pyx_c_is_zero_double(z) ((z)==(double)0)
#define __Pyx_c_conj_double(z) (::std::conj(z))
#if 1
#define __Pyx_c_abs_double(z) (::std::abs(z))
#define __Pyx_c_pow_double(a, b) (::std::pow(a, b))
#endif
#else
#define __Pyx_c_is_zero_double(z) ((z)==0)
#define __Pyx_c_conj_double(z) (conj(z))
#if 1
#define __Pyx_c_abs_double(z) (cabs(z))
#define __Pyx_c_pow_double(a, b) (cpow(a, b))
#endif
#endif
#else
static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex);
static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex);
#if 1
static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex, __pyx_t_double_complex);
#endif
#endif
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value);
/* CIntFromPy.proto */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *);
/* CIntFromPy.proto */
static CYTHON_INLINE unsigned int __Pyx_PyInt_As_unsigned_int(PyObject *);
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value);
/* CIntFromPy.proto */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *);
/* CheckBinaryVersion.proto */
static int __Pyx_check_binary_version(void);
/* PyIdentifierFromString.proto */
#if !defined(__Pyx_PyIdentifier_FromString)
#if PY_MAJOR_VERSION < 3
#define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s)
#else
#define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s)
#endif
#endif
/* ModuleImport.proto */
static PyObject *__Pyx_ImportModule(const char *name);
/* TypeImport.proto */
static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict);
/* InitStrings.proto */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t);
/* Module declarations from 'cpython.buffer' */
/* Module declarations from 'libc.string' */
/* Module declarations from 'libc.stdio' */
/* Module declarations from '__builtin__' */
/* Module declarations from 'cpython.type' */
static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0;
/* Module declarations from 'cpython' */
/* Module declarations from 'cpython.object' */
/* Module declarations from 'cpython.ref' */
/* Module declarations from 'libc.stdlib' */
/* Module declarations from 'numpy' */
/* Module declarations from 'numpy' */
static PyTypeObject *__pyx_ptype_5numpy_dtype = 0;
static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0;
static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0;
static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0;
static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0;
static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/
/* Module declarations from 'cython' */
/* Module declarations from 'libc.math' */
/* Module declarations from 'DX' */
static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_2DX_DTYPE_t = { "DTYPE_t", NULL, sizeof(__pyx_t_2DX_DTYPE_t), { 0 }, 0, 'R', 0, 0 };
static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_5numpy_int_t = { "int_t", NULL, sizeof(__pyx_t_5numpy_int_t), { 0 }, 0, IS_UNSIGNED(__pyx_t_5numpy_int_t) ? 'U' : 'I', IS_UNSIGNED(__pyx_t_5numpy_int_t), 0 };
#define __Pyx_MODULE_NAME "DX"
int __pyx_module_is_main_DX = 0;
/* Implementation of 'DX' */
static PyObject *__pyx_builtin_xrange;
static PyObject *__pyx_builtin_ValueError;
static PyObject *__pyx_builtin_range;
static PyObject *__pyx_builtin_RuntimeError;
static PyObject *__pyx_builtin_ImportError;
static const char __pyx_k_L[] = "L";
static const char __pyx_k_U[] = "U";
static const char __pyx_k_i[] = "i";
static const char __pyx_k_j[] = "j";
static const char __pyx_k_k[] = "k";
static const char __pyx_k_m[] = "m";
static const char __pyx_k_n[] = "n";
static const char __pyx_k_DX[] = "DX";
static const char __pyx_k_Pr[] = "Pr";
static const char __pyx_k_ni[] = "ni";
static const char __pyx_k_np[] = "np";
static const char __pyx_k_D_n[] = "D_n";
static const char __pyx_k_X_0[] = "X_0";
static const char __pyx_k_gap[] = "gap";
static const char __pyx_k_DX_h[] = "DX_h";
static const char __pyx_k_Pr_n[] = "Pr_n";
static const char __pyx_k_aux1[] = "aux1";
static const char __pyx_k_aux2[] = "aux2";
static const char __pyx_k_beta[] = "beta";
static const char __pyx_k_main[] = "__main__";
static const char __pyx_k_test[] = "__test__";
static const char __pyx_k_DTYPE[] = "DTYPE";
static const char __pyx_k_alpha[] = "alpha";
static const char __pyx_k_delta[] = "delta";
static const char __pyx_k_dtype[] = "dtype";
static const char __pyx_k_float[] = "float";
static const char __pyx_k_lamda[] = "lamda";
static const char __pyx_k_numpy[] = "numpy";
static const char __pyx_k_range[] = "range";
static const char __pyx_k_sum_i[] = "sum_i";
static const char __pyx_k_zeros[] = "zeros";
static const char __pyx_k_DX_pyx[] = "DX.pyx";
static const char __pyx_k_import[] = "__import__";
static const char __pyx_k_nZones[] = "nZones";
static const char __pyx_k_sum_mi[] = "sum_mi";
static const char __pyx_k_xrange[] = "xrange";
static const char __pyx_k_delta_mn[] = "delta_mn";
static const char __pyx_k_nSectors[] = "nSectors";
static const char __pyx_k_ValueError[] = "ValueError";
static const char __pyx_k_ImportError[] = "ImportError";
static const char __pyx_k_cython_DX_h[] = "cython_DX_h";
static const char __pyx_k_cython_DX_n[] = "cython_DX_n";
static const char __pyx_k_RuntimeError[] = "RuntimeError";
static const char __pyx_k_genflux_sectors[] = "genflux_sectors";
static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback";
static const char __pyx_k_ndarray_is_not_C_contiguous[] = "ndarray is not C contiguous";
static const char __pyx_k_numpy_core_multiarray_failed_to[] = "numpy.core.multiarray failed to import";
static const char __pyx_k_unknown_dtype_code_in_numpy_pxd[] = "unknown dtype code in numpy.pxd (%d)";
static const char __pyx_k_Format_string_allocated_too_shor[] = "Format string allocated too short, see comment in numpy.pxd";
static const char __pyx_k_Non_native_byte_order_not_suppor[] = "Non-native byte order not supported";
static const char __pyx_k_ndarray_is_not_Fortran_contiguou[] = "ndarray is not Fortran contiguous";
static const char __pyx_k_numpy_core_umath_failed_to_impor[] = "numpy.core.umath failed to import";
static const char __pyx_k_Format_string_allocated_too_shor_2[] = "Format string allocated too short.";
static PyObject *__pyx_n_s_DTYPE;
static PyObject *__pyx_n_s_DX;
static PyObject *__pyx_n_s_DX_h;
static PyObject *__pyx_kp_s_DX_pyx;
static PyObject *__pyx_n_s_D_n;
static PyObject *__pyx_kp_u_Format_string_allocated_too_shor;
static PyObject *__pyx_kp_u_Format_string_allocated_too_shor_2;
static PyObject *__pyx_n_s_ImportError;
static PyObject *__pyx_n_s_L;
static PyObject *__pyx_kp_u_Non_native_byte_order_not_suppor;
static PyObject *__pyx_n_s_Pr;
static PyObject *__pyx_n_s_Pr_n;
static PyObject *__pyx_n_s_RuntimeError;
static PyObject *__pyx_n_s_U;
static PyObject *__pyx_n_s_ValueError;
static PyObject *__pyx_n_s_X_0;
static PyObject *__pyx_n_s_alpha;
static PyObject *__pyx_n_s_aux1;
static PyObject *__pyx_n_s_aux2;
static PyObject *__pyx_n_s_beta;
static PyObject *__pyx_n_s_cline_in_traceback;
static PyObject *__pyx_n_s_cython_DX_h;
static PyObject *__pyx_n_s_cython_DX_n;
static PyObject *__pyx_n_s_delta;
static PyObject *__pyx_n_s_delta_mn;
static PyObject *__pyx_n_s_dtype;
static PyObject *__pyx_n_s_float;
static PyObject *__pyx_n_s_gap;
static PyObject *__pyx_n_s_genflux_sectors;
static PyObject *__pyx_n_s_i;
static PyObject *__pyx_n_s_import;
static PyObject *__pyx_n_s_j;
static PyObject *__pyx_n_s_k;
static PyObject *__pyx_n_s_lamda;
static PyObject *__pyx_n_s_m;
static PyObject *__pyx_n_s_main;
static PyObject *__pyx_n_s_n;
static PyObject *__pyx_n_s_nSectors;
static PyObject *__pyx_n_s_nZones;
static PyObject *__pyx_kp_u_ndarray_is_not_C_contiguous;
static PyObject *__pyx_kp_u_ndarray_is_not_Fortran_contiguou;
static PyObject *__pyx_n_s_ni;
static PyObject *__pyx_n_s_np;
static PyObject *__pyx_n_s_numpy;
static PyObject *__pyx_kp_s_numpy_core_multiarray_failed_to;
static PyObject *__pyx_kp_s_numpy_core_umath_failed_to_impor;
static PyObject *__pyx_n_s_range;
static PyObject *__pyx_n_s_sum_i;
static PyObject *__pyx_n_s_sum_mi;
static PyObject *__pyx_n_s_test;
static PyObject *__pyx_kp_u_unknown_dtype_code_in_numpy_pxd;
static PyObject *__pyx_n_s_xrange;
static PyObject *__pyx_n_s_zeros;
static PyObject *__pyx_pf_2DX_cython_DX_h(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_Pr, PyArrayObject *__pyx_v_lamda, PyArrayObject *__pyx_v_beta, PyArrayObject *__pyx_v_U, PyArrayObject *__pyx_v_alpha, PyArrayObject *__pyx_v_gap, PyArrayObject *__pyx_v_delta, PyArrayObject *__pyx_v_X_0, int __pyx_v_nSectors, int __pyx_v_nZones, PyArrayObject *__pyx_v_genflux_sectors); /* proto */
static PyObject *__pyx_pf_2DX_2cython_DX_n(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_DX, CYTHON_UNUSED int __pyx_v_nSectors, int __pyx_v_nZones, __pyx_t_2DX_DTYPE_t __pyx_v_beta, __pyx_t_2DX_DTYPE_t __pyx_v_lamda, PyArrayObject *__pyx_v_D_n, PyArrayObject *__pyx_v_Pr_n); /* proto */
static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */
static PyObject *__pyx_tuple_;
static PyObject *__pyx_tuple__2;
static PyObject *__pyx_tuple__3;
static PyObject *__pyx_tuple__4;
static PyObject *__pyx_tuple__5;
static PyObject *__pyx_tuple__6;
static PyObject *__pyx_tuple__7;
static PyObject *__pyx_tuple__8;
static PyObject *__pyx_tuple__9;
static PyObject *__pyx_tuple__10;
static PyObject *__pyx_tuple__12;
static PyObject *__pyx_codeobj__11;
static PyObject *__pyx_codeobj__13;
/* "DX.pyx":19
*
* @cython.boundscheck(False) # turn of bounds-checking for entire function
* def cython_DX_h(c_np.ndarray[DTYPE_t, ndim=3] Pr, # <<<<<<<<<<<<<<
* c_np.ndarray[DTYPE_t, ndim=1] lamda,
* c_np.ndarray[DTYPE_t, ndim=1] beta,
*/
/* Python wrapper */
static PyObject *__pyx_pw_2DX_1cython_DX_h(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_mdef_2DX_1cython_DX_h = {"cython_DX_h", (PyCFunction)__pyx_pw_2DX_1cython_DX_h, METH_VARARGS|METH_KEYWORDS, 0};
static PyObject *__pyx_pw_2DX_1cython_DX_h(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyArrayObject *__pyx_v_Pr = 0;
PyArrayObject *__pyx_v_lamda = 0;
PyArrayObject *__pyx_v_beta = 0;
PyArrayObject *__pyx_v_U = 0;
PyArrayObject *__pyx_v_alpha = 0;
PyArrayObject *__pyx_v_gap = 0;
PyArrayObject *__pyx_v_delta = 0;
PyArrayObject *__pyx_v_X_0 = 0;
int __pyx_v_nSectors;
int __pyx_v_nZones;
PyArrayObject *__pyx_v_genflux_sectors = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("cython_DX_h (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_Pr,&__pyx_n_s_lamda,&__pyx_n_s_beta,&__pyx_n_s_U,&__pyx_n_s_alpha,&__pyx_n_s_gap,&__pyx_n_s_delta,&__pyx_n_s_X_0,&__pyx_n_s_nSectors,&__pyx_n_s_nZones,&__pyx_n_s_genflux_sectors,0};
PyObject* values[11] = {0,0,0,0,0,0,0,0,0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10);
CYTHON_FALLTHROUGH;
case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9);
CYTHON_FALLTHROUGH;
case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8);
CYTHON_FALLTHROUGH;
case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7);
CYTHON_FALLTHROUGH;
case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
CYTHON_FALLTHROUGH;
case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
CYTHON_FALLTHROUGH;
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
CYTHON_FALLTHROUGH;
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Pr)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_lamda)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("cython_DX_h", 1, 11, 11, 1); __PYX_ERR(0, 19, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_beta)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("cython_DX_h", 1, 11, 11, 2); __PYX_ERR(0, 19, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 3:
if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_U)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("cython_DX_h", 1, 11, 11, 3); __PYX_ERR(0, 19, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 4:
if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_alpha)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("cython_DX_h", 1, 11, 11, 4); __PYX_ERR(0, 19, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 5:
if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_gap)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("cython_DX_h", 1, 11, 11, 5); __PYX_ERR(0, 19, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 6:
if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_delta)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("cython_DX_h", 1, 11, 11, 6); __PYX_ERR(0, 19, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 7:
if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_X_0)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("cython_DX_h", 1, 11, 11, 7); __PYX_ERR(0, 19, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 8:
if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_nSectors)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("cython_DX_h", 1, 11, 11, 8); __PYX_ERR(0, 19, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 9:
if (likely((values[9] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_nZones)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("cython_DX_h", 1, 11, 11, 9); __PYX_ERR(0, 19, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 10:
if (likely((values[10] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_genflux_sectors)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("cython_DX_h", 1, 11, 11, 10); __PYX_ERR(0, 19, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "cython_DX_h") < 0)) __PYX_ERR(0, 19, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 11) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
values[7] = PyTuple_GET_ITEM(__pyx_args, 7);
values[8] = PyTuple_GET_ITEM(__pyx_args, 8);
values[9] = PyTuple_GET_ITEM(__pyx_args, 9);
values[10] = PyTuple_GET_ITEM(__pyx_args, 10);
}
__pyx_v_Pr = ((PyArrayObject *)values[0]);
__pyx_v_lamda = ((PyArrayObject *)values[1]);
__pyx_v_beta = ((PyArrayObject *)values[2]);
__pyx_v_U = ((PyArrayObject *)values[3]);
__pyx_v_alpha = ((PyArrayObject *)values[4]);
__pyx_v_gap = ((PyArrayObject *)values[5]);
__pyx_v_delta = ((PyArrayObject *)values[6]);
__pyx_v_X_0 = ((PyArrayObject *)values[7]);
__pyx_v_nSectors = __Pyx_PyInt_As_int(values[8]); if (unlikely((__pyx_v_nSectors == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 27, __pyx_L3_error)
__pyx_v_nZones = __Pyx_PyInt_As_int(values[9]); if (unlikely((__pyx_v_nZones == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 28, __pyx_L3_error)
__pyx_v_genflux_sectors = ((PyArrayObject *)values[10]);
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("cython_DX_h", 1, 11, 11, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 19, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("DX.cython_DX_h", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_Pr), __pyx_ptype_5numpy_ndarray, 1, "Pr", 0))) __PYX_ERR(0, 19, __pyx_L1_error)
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_lamda), __pyx_ptype_5numpy_ndarray, 1, "lamda", 0))) __PYX_ERR(0, 20, __pyx_L1_error)
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_beta), __pyx_ptype_5numpy_ndarray, 1, "beta", 0))) __PYX_ERR(0, 21, __pyx_L1_error)
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_U), __pyx_ptype_5numpy_ndarray, 1, "U", 0))) __PYX_ERR(0, 22, __pyx_L1_error)
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_alpha), __pyx_ptype_5numpy_ndarray, 1, "alpha", 0))) __PYX_ERR(0, 23, __pyx_L1_error)
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_gap), __pyx_ptype_5numpy_ndarray, 1, "gap", 0))) __PYX_ERR(0, 24, __pyx_L1_error)
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_delta), __pyx_ptype_5numpy_ndarray, 1, "delta", 0))) __PYX_ERR(0, 25, __pyx_L1_error)
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_X_0), __pyx_ptype_5numpy_ndarray, 1, "X_0", 0))) __PYX_ERR(0, 26, __pyx_L1_error)
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_genflux_sectors), __pyx_ptype_5numpy_ndarray, 1, "genflux_sectors", 0))) __PYX_ERR(0, 29, __pyx_L1_error)
__pyx_r = __pyx_pf_2DX_cython_DX_h(__pyx_self, __pyx_v_Pr, __pyx_v_lamda, __pyx_v_beta, __pyx_v_U, __pyx_v_alpha, __pyx_v_gap, __pyx_v_delta, __pyx_v_X_0, __pyx_v_nSectors, __pyx_v_nZones, __pyx_v_genflux_sectors);
/* function exit code */
goto __pyx_L0;
__pyx_L1_error:;
__pyx_r = NULL;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_2DX_cython_DX_h(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_Pr, PyArrayObject *__pyx_v_lamda, PyArrayObject *__pyx_v_beta, PyArrayObject *__pyx_v_U, PyArrayObject *__pyx_v_alpha, PyArrayObject *__pyx_v_gap, PyArrayObject *__pyx_v_delta, PyArrayObject *__pyx_v_X_0, int __pyx_v_nSectors, int __pyx_v_nZones, PyArrayObject *__pyx_v_genflux_sectors) {
PyArrayObject *__pyx_v_DX_h = 0;
__pyx_t_2DX_DTYPE_t __pyx_v_sum_mi;
__pyx_t_2DX_DTYPE_t __pyx_v_aux1;
__pyx_t_2DX_DTYPE_t __pyx_v_aux2;
__pyx_t_2DX_DTYPE_t __pyx_v_delta_mn;
unsigned int __pyx_v_i;
unsigned int __pyx_v_j;
unsigned int __pyx_v_k;
unsigned int __pyx_v_m;
unsigned int __pyx_v_ni;
CYTHON_UNUSED unsigned int __pyx_v_L;
unsigned int __pyx_v_n;
__Pyx_LocalBuf_ND __pyx_pybuffernd_DX_h;
__Pyx_Buffer __pyx_pybuffer_DX_h;
__Pyx_LocalBuf_ND __pyx_pybuffernd_Pr;
__Pyx_Buffer __pyx_pybuffer_Pr;
__Pyx_LocalBuf_ND __pyx_pybuffernd_U;
__Pyx_Buffer __pyx_pybuffer_U;
__Pyx_LocalBuf_ND __pyx_pybuffernd_X_0;
__Pyx_Buffer __pyx_pybuffer_X_0;
__Pyx_LocalBuf_ND __pyx_pybuffernd_alpha;
__Pyx_Buffer __pyx_pybuffer_alpha;
__Pyx_LocalBuf_ND __pyx_pybuffernd_beta;
__Pyx_Buffer __pyx_pybuffer_beta;
__Pyx_LocalBuf_ND __pyx_pybuffernd_delta;
__Pyx_Buffer __pyx_pybuffer_delta;
__Pyx_LocalBuf_ND __pyx_pybuffernd_gap;
__Pyx_Buffer __pyx_pybuffer_gap;
__Pyx_LocalBuf_ND __pyx_pybuffernd_genflux_sectors;
__Pyx_Buffer __pyx_pybuffer_genflux_sectors;
__Pyx_LocalBuf_ND __pyx_pybuffernd_lamda;
__Pyx_Buffer __pyx_pybuffer_lamda;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_t_5;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
PyArrayObject *__pyx_t_9 = NULL;
unsigned int __pyx_t_10;
unsigned int __pyx_t_11;
unsigned int __pyx_t_12;
size_t __pyx_t_13;
int __pyx_t_14;
unsigned int __pyx_t_15;
int __pyx_t_16;
unsigned int __pyx_t_17;
int __pyx_t_18;
unsigned int __pyx_t_19;
size_t __pyx_t_20;
size_t __pyx_t_21;
Py_ssize_t __pyx_t_22;
size_t __pyx_t_23;
size_t __pyx_t_24;
size_t __pyx_t_25;
size_t __pyx_t_26;
size_t __pyx_t_27;
size_t __pyx_t_28;
size_t __pyx_t_29;
size_t __pyx_t_30;
int __pyx_t_31;
unsigned int __pyx_t_32;
size_t __pyx_t_33;
size_t __pyx_t_34;
size_t __pyx_t_35;
size_t __pyx_t_36;
size_t __pyx_t_37;
size_t __pyx_t_38;
size_t __pyx_t_39;
size_t __pyx_t_40;
size_t __pyx_t_41;
size_t __pyx_t_42;
size_t __pyx_t_43;
size_t __pyx_t_44;
size_t __pyx_t_45;
size_t __pyx_t_46;
size_t __pyx_t_47;
size_t __pyx_t_48;
size_t __pyx_t_49;
size_t __pyx_t_50;
size_t __pyx_t_51;
size_t __pyx_t_52;
size_t __pyx_t_53;
size_t __pyx_t_54;
size_t __pyx_t_55;
size_t __pyx_t_56;
size_t __pyx_t_57;
size_t __pyx_t_58;
size_t __pyx_t_59;
size_t __pyx_t_60;
size_t __pyx_t_61;
size_t __pyx_t_62;
size_t __pyx_t_63;
size_t __pyx_t_64;
size_t __pyx_t_65;
size_t __pyx_t_66;
size_t __pyx_t_67;
size_t __pyx_t_68;
size_t __pyx_t_69;
size_t __pyx_t_70;
size_t __pyx_t_71;
size_t __pyx_t_72;
size_t __pyx_t_73;
size_t __pyx_t_74;
size_t __pyx_t_75;
size_t __pyx_t_76;
size_t __pyx_t_77;
size_t __pyx_t_78;
size_t __pyx_t_79;
size_t __pyx_t_80;
size_t __pyx_t_81;
size_t __pyx_t_82;
size_t __pyx_t_83;
size_t __pyx_t_84;
size_t __pyx_t_85;
size_t __pyx_t_86;
size_t __pyx_t_87;
size_t __pyx_t_88;
size_t __pyx_t_89;
size_t __pyx_t_90;
size_t __pyx_t_91;
size_t __pyx_t_92;
size_t __pyx_t_93;
size_t __pyx_t_94;
size_t __pyx_t_95;
size_t __pyx_t_96;
size_t __pyx_t_97;
size_t __pyx_t_98;
size_t __pyx_t_99;
size_t __pyx_t_100;
size_t __pyx_t_101;
size_t __pyx_t_102;
__Pyx_RefNannySetupContext("cython_DX_h", 0);
__pyx_pybuffer_DX_h.pybuffer.buf = NULL;
__pyx_pybuffer_DX_h.refcount = 0;
__pyx_pybuffernd_DX_h.data = NULL;
__pyx_pybuffernd_DX_h.rcbuffer = &__pyx_pybuffer_DX_h;
__pyx_pybuffer_Pr.pybuffer.buf = NULL;
__pyx_pybuffer_Pr.refcount = 0;
__pyx_pybuffernd_Pr.data = NULL;
__pyx_pybuffernd_Pr.rcbuffer = &__pyx_pybuffer_Pr;
__pyx_pybuffer_lamda.pybuffer.buf = NULL;
__pyx_pybuffer_lamda.refcount = 0;
__pyx_pybuffernd_lamda.data = NULL;
__pyx_pybuffernd_lamda.rcbuffer = &__pyx_pybuffer_lamda;
__pyx_pybuffer_beta.pybuffer.buf = NULL;
__pyx_pybuffer_beta.refcount = 0;
__pyx_pybuffernd_beta.data = NULL;
__pyx_pybuffernd_beta.rcbuffer = &__pyx_pybuffer_beta;
__pyx_pybuffer_U.pybuffer.buf = NULL;
__pyx_pybuffer_U.refcount = 0;
__pyx_pybuffernd_U.data = NULL;
__pyx_pybuffernd_U.rcbuffer = &__pyx_pybuffer_U;
__pyx_pybuffer_alpha.pybuffer.buf = NULL;
__pyx_pybuffer_alpha.refcount = 0;
__pyx_pybuffernd_alpha.data = NULL;
__pyx_pybuffernd_alpha.rcbuffer = &__pyx_pybuffer_alpha;
__pyx_pybuffer_gap.pybuffer.buf = NULL;
__pyx_pybuffer_gap.refcount = 0;
__pyx_pybuffernd_gap.data = NULL;
__pyx_pybuffernd_gap.rcbuffer = &__pyx_pybuffer_gap;
__pyx_pybuffer_delta.pybuffer.buf = NULL;
__pyx_pybuffer_delta.refcount = 0;
__pyx_pybuffernd_delta.data = NULL;
__pyx_pybuffernd_delta.rcbuffer = &__pyx_pybuffer_delta;
__pyx_pybuffer_X_0.pybuffer.buf = NULL;
__pyx_pybuffer_X_0.refcount = 0;
__pyx_pybuffernd_X_0.data = NULL;
__pyx_pybuffernd_X_0.rcbuffer = &__pyx_pybuffer_X_0;
__pyx_pybuffer_genflux_sectors.pybuffer.buf = NULL;
__pyx_pybuffer_genflux_sectors.refcount = 0;
__pyx_pybuffernd_genflux_sectors.data = NULL;
__pyx_pybuffernd_genflux_sectors.rcbuffer = &__pyx_pybuffer_genflux_sectors;
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_Pr.rcbuffer->pybuffer, (PyObject*)__pyx_v_Pr, &__Pyx_TypeInfo_nn___pyx_t_2DX_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 3, 0, __pyx_stack) == -1)) __PYX_ERR(0, 19, __pyx_L1_error)
}
__pyx_pybuffernd_Pr.diminfo[0].strides = __pyx_pybuffernd_Pr.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_Pr.diminfo[0].shape = __pyx_pybuffernd_Pr.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_Pr.diminfo[1].strides = __pyx_pybuffernd_Pr.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_Pr.diminfo[1].shape = __pyx_pybuffernd_Pr.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd_Pr.diminfo[2].strides = __pyx_pybuffernd_Pr.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd_Pr.diminfo[2].shape = __pyx_pybuffernd_Pr.rcbuffer->pybuffer.shape[2];
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_lamda.rcbuffer->pybuffer, (PyObject*)__pyx_v_lamda, &__Pyx_TypeInfo_nn___pyx_t_2DX_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) __PYX_ERR(0, 19, __pyx_L1_error)
}
__pyx_pybuffernd_lamda.diminfo[0].strides = __pyx_pybuffernd_lamda.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_lamda.diminfo[0].shape = __pyx_pybuffernd_lamda.rcbuffer->pybuffer.shape[0];
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_beta.rcbuffer->pybuffer, (PyObject*)__pyx_v_beta, &__Pyx_TypeInfo_nn___pyx_t_2DX_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) __PYX_ERR(0, 19, __pyx_L1_error)
}
__pyx_pybuffernd_beta.diminfo[0].strides = __pyx_pybuffernd_beta.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_beta.diminfo[0].shape = __pyx_pybuffernd_beta.rcbuffer->pybuffer.shape[0];
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_U.rcbuffer->pybuffer, (PyObject*)__pyx_v_U, &__Pyx_TypeInfo_nn___pyx_t_2DX_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 19, __pyx_L1_error)
}
__pyx_pybuffernd_U.diminfo[0].strides = __pyx_pybuffernd_U.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_U.diminfo[0].shape = __pyx_pybuffernd_U.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_U.diminfo[1].strides = __pyx_pybuffernd_U.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_U.diminfo[1].shape = __pyx_pybuffernd_U.rcbuffer->pybuffer.shape[1];
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_alpha.rcbuffer->pybuffer, (PyObject*)__pyx_v_alpha, &__Pyx_TypeInfo_nn___pyx_t_2DX_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 3, 0, __pyx_stack) == -1)) __PYX_ERR(0, 19, __pyx_L1_error)
}
__pyx_pybuffernd_alpha.diminfo[0].strides = __pyx_pybuffernd_alpha.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_alpha.diminfo[0].shape = __pyx_pybuffernd_alpha.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_alpha.diminfo[1].strides = __pyx_pybuffernd_alpha.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_alpha.diminfo[1].shape = __pyx_pybuffernd_alpha.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd_alpha.diminfo[2].strides = __pyx_pybuffernd_alpha.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd_alpha.diminfo[2].shape = __pyx_pybuffernd_alpha.rcbuffer->pybuffer.shape[2];
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_gap.rcbuffer->pybuffer, (PyObject*)__pyx_v_gap, &__Pyx_TypeInfo_nn___pyx_t_2DX_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 19, __pyx_L1_error)
}
__pyx_pybuffernd_gap.diminfo[0].strides = __pyx_pybuffernd_gap.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_gap.diminfo[0].shape = __pyx_pybuffernd_gap.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_gap.diminfo[1].strides = __pyx_pybuffernd_gap.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_gap.diminfo[1].shape = __pyx_pybuffernd_gap.rcbuffer->pybuffer.shape[1];
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_delta.rcbuffer->pybuffer, (PyObject*)__pyx_v_delta, &__Pyx_TypeInfo_nn___pyx_t_2DX_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 19, __pyx_L1_error)
}
__pyx_pybuffernd_delta.diminfo[0].strides = __pyx_pybuffernd_delta.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_delta.diminfo[0].shape = __pyx_pybuffernd_delta.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_delta.diminfo[1].strides = __pyx_pybuffernd_delta.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_delta.diminfo[1].shape = __pyx_pybuffernd_delta.rcbuffer->pybuffer.shape[1];
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_X_0.rcbuffer->pybuffer, (PyObject*)__pyx_v_X_0, &__Pyx_TypeInfo_nn___pyx_t_2DX_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 19, __pyx_L1_error)
}
__pyx_pybuffernd_X_0.diminfo[0].strides = __pyx_pybuffernd_X_0.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_X_0.diminfo[0].shape = __pyx_pybuffernd_X_0.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_X_0.diminfo[1].strides = __pyx_pybuffernd_X_0.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_X_0.diminfo[1].shape = __pyx_pybuffernd_X_0.rcbuffer->pybuffer.shape[1];
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_genflux_sectors.rcbuffer->pybuffer, (PyObject*)__pyx_v_genflux_sectors, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) __PYX_ERR(0, 19, __pyx_L1_error)
}
__pyx_pybuffernd_genflux_sectors.diminfo[0].strides = __pyx_pybuffernd_genflux_sectors.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_genflux_sectors.diminfo[0].shape = __pyx_pybuffernd_genflux_sectors.rcbuffer->pybuffer.shape[0];
/* "DX.pyx":31
* c_np.ndarray[c_np.int_t, ndim=1] genflux_sectors):
*
* assert Pr.dtype == DTYPE and lamda.dtype == DTYPE and \ # <<<<<<<<<<<<<<
* beta.dtype == DTYPE and U.dtype == DTYPE and alpha.dtype == DTYPE and \
* gap.dtype == DTYPE and delta.dtype == DTYPE and X_0.dtype == DTYPE and \
*/
#ifndef CYTHON_WITHOUT_ASSERTIONS
if (unlikely(!Py_OptimizeFlag)) {
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_Pr), __pyx_n_s_dtype); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 31, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 31, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_t_2, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 31, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 31, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_5) {
} else {
__pyx_t_1 = __pyx_t_5;
goto __pyx_L3_bool_binop_done;
}
__pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_lamda), __pyx_n_s_dtype); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 31, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 31, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_2 = PyObject_RichCompare(__pyx_t_4, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 31, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 31, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (__pyx_t_5) {
} else {
__pyx_t_1 = __pyx_t_5;
goto __pyx_L3_bool_binop_done;
}
/* "DX.pyx":32
*
* assert Pr.dtype == DTYPE and lamda.dtype == DTYPE and \
* beta.dtype == DTYPE and U.dtype == DTYPE and alpha.dtype == DTYPE and \ # <<<<<<<<<<<<<<
* gap.dtype == DTYPE and delta.dtype == DTYPE and X_0.dtype == DTYPE and \
* genflux_sectors.dtype == int
*/
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_beta), __pyx_n_s_dtype); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 32, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 32, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_t_2, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 32, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 32, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_5) {
} else {
__pyx_t_1 = __pyx_t_5;
goto __pyx_L3_bool_binop_done;
}
__pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_U), __pyx_n_s_dtype); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 32, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 32, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_2 = PyObject_RichCompare(__pyx_t_4, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 32, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 32, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (__pyx_t_5) {
} else {
__pyx_t_1 = __pyx_t_5;
goto __pyx_L3_bool_binop_done;
}
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_alpha), __pyx_n_s_dtype); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 32, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 32, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_t_2, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 32, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 32, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_5) {
} else {
__pyx_t_1 = __pyx_t_5;
goto __pyx_L3_bool_binop_done;
}
/* "DX.pyx":33
* assert Pr.dtype == DTYPE and lamda.dtype == DTYPE and \
* beta.dtype == DTYPE and U.dtype == DTYPE and alpha.dtype == DTYPE and \
* gap.dtype == DTYPE and delta.dtype == DTYPE and X_0.dtype == DTYPE and \ # <<<<<<<<<<<<<<
* genflux_sectors.dtype == int
*
*/
__pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_gap), __pyx_n_s_dtype); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 33, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 33, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_2 = PyObject_RichCompare(__pyx_t_4, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 33, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 33, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (__pyx_t_5) {
} else {
__pyx_t_1 = __pyx_t_5;
goto __pyx_L3_bool_binop_done;
}
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_delta), __pyx_n_s_dtype); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 33, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 33, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_t_2, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 33, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 33, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_5) {
} else {
__pyx_t_1 = __pyx_t_5;
goto __pyx_L3_bool_binop_done;
}
__pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_X_0), __pyx_n_s_dtype); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 33, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 33, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_2 = PyObject_RichCompare(__pyx_t_4, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 33, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 33, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (__pyx_t_5) {
} else {
__pyx_t_1 = __pyx_t_5;
goto __pyx_L3_bool_binop_done;
}
/* "DX.pyx":34
* beta.dtype == DTYPE and U.dtype == DTYPE and alpha.dtype == DTYPE and \
* gap.dtype == DTYPE and delta.dtype == DTYPE and X_0.dtype == DTYPE and \
* genflux_sectors.dtype == int # <<<<<<<<<<<<<<
*
* cdef c_np.ndarray[DTYPE_t,ndim=4] DX_h = np.zeros((nSectors,nZones,
*/
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_genflux_sectors), __pyx_n_s_dtype); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 34, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyObject_RichCompare(__pyx_t_2, ((PyObject *)(&PyInt_Type)), Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 34, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 34, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_1 = __pyx_t_5;
__pyx_L3_bool_binop_done:;
if (unlikely(!__pyx_t_1)) {
PyErr_SetNone(PyExc_AssertionError);
__PYX_ERR(0, 31, __pyx_L1_error)
}
}
#endif
/* "DX.pyx":36
* genflux_sectors.dtype == int
*
* cdef c_np.ndarray[DTYPE_t,ndim=4] DX_h = np.zeros((nSectors,nZones, # <<<<<<<<<<<<<<
* nSectors,nZones),
* dtype=DTYPE)
*/
__pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 36, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 36, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_nSectors); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 36, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_nZones); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 36, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
/* "DX.pyx":37
*
* cdef c_np.ndarray[DTYPE_t,ndim=4] DX_h = np.zeros((nSectors,nZones,
* nSectors,nZones), # <<<<<<<<<<<<<<
* dtype=DTYPE)
* cdef DTYPE_t sum_mi,aux1,aux2,delta_mn
*/
__pyx_t_6 = __Pyx_PyInt_From_int(__pyx_v_nSectors); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 37, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_7 = __Pyx_PyInt_From_int(__pyx_v_nZones); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 37, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
/* "DX.pyx":36
* genflux_sectors.dtype == int
*
* cdef c_np.ndarray[DTYPE_t,ndim=4] DX_h = np.zeros((nSectors,nZones, # <<<<<<<<<<<<<<
* nSectors,nZones),
* dtype=DTYPE)
*/
__pyx_t_8 = PyTuple_New(4); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 36, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_6);
__Pyx_GIVEREF(__pyx_t_7);
PyTuple_SET_ITEM(__pyx_t_8, 3, __pyx_t_7);
__pyx_t_3 = 0;
__pyx_t_4 = 0;
__pyx_t_6 = 0;
__pyx_t_7 = 0;
__pyx_t_7 = PyTuple_New(1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 36, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_GIVEREF(__pyx_t_8);
PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_8);
__pyx_t_8 = 0;
/* "DX.pyx":38
* cdef c_np.ndarray[DTYPE_t,ndim=4] DX_h = np.zeros((nSectors,nZones,
* nSectors,nZones),
* dtype=DTYPE) # <<<<<<<<<<<<<<
* cdef DTYPE_t sum_mi,aux1,aux2,delta_mn
* cdef unsigned int i,j,k,m,ni,L,n#, zero_entries
*/
__pyx_t_8 = PyDict_New(); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 38, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
__pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 38, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
if (PyDict_SetItem(__pyx_t_8, __pyx_n_s_dtype, __pyx_t_6) < 0) __PYX_ERR(0, 38, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
/* "DX.pyx":36
* genflux_sectors.dtype == int
*
* cdef c_np.ndarray[DTYPE_t,ndim=4] DX_h = np.zeros((nSectors,nZones, # <<<<<<<<<<<<<<
* nSectors,nZones),
* dtype=DTYPE)
*/
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_7, __pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 36, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
if (!(likely(((__pyx_t_6) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_6, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 36, __pyx_L1_error)
__pyx_t_9 = ((PyArrayObject *)__pyx_t_6);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_DX_h.rcbuffer->pybuffer, (PyObject*)__pyx_t_9, &__Pyx_TypeInfo_nn___pyx_t_2DX_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 4, 0, __pyx_stack) == -1)) {
__pyx_v_DX_h = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_DX_h.rcbuffer->pybuffer.buf = NULL;
__PYX_ERR(0, 36, __pyx_L1_error)
} else {__pyx_pybuffernd_DX_h.diminfo[0].strides = __pyx_pybuffernd_DX_h.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_DX_h.diminfo[0].shape = __pyx_pybuffernd_DX_h.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_DX_h.diminfo[1].strides = __pyx_pybuffernd_DX_h.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_DX_h.diminfo[1].shape = __pyx_pybuffernd_DX_h.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd_DX_h.diminfo[2].strides = __pyx_pybuffernd_DX_h.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd_DX_h.diminfo[2].shape = __pyx_pybuffernd_DX_h.rcbuffer->pybuffer.shape[2]; __pyx_pybuffernd_DX_h.diminfo[3].strides = __pyx_pybuffernd_DX_h.rcbuffer->pybuffer.strides[3]; __pyx_pybuffernd_DX_h.diminfo[3].shape = __pyx_pybuffernd_DX_h.rcbuffer->pybuffer.shape[3];
}
}
__pyx_t_9 = 0;
__pyx_v_DX_h = ((PyArrayObject *)__pyx_t_6);
__pyx_t_6 = 0;
/* "DX.pyx":41
* cdef DTYPE_t sum_mi,aux1,aux2,delta_mn
* cdef unsigned int i,j,k,m,ni,L,n#, zero_entries
* sum_mi=0.0 # <<<<<<<<<<<<<<
* aux1=0.0
* aux2=0.0
*/
__pyx_v_sum_mi = 0.0;
/* "DX.pyx":42
* cdef unsigned int i,j,k,m,ni,L,n#, zero_entries
* sum_mi=0.0
* aux1=0.0 # <<<<<<<<<<<<<<
* aux2=0.0
* n=0
*/
__pyx_v_aux1 = 0.0;
/* "DX.pyx":43
* sum_mi=0.0
* aux1=0.0
* aux2=0.0 # <<<<<<<<<<<<<<
* n=0
* # zero_entries=0
*/
__pyx_v_aux2 = 0.0;
/* "DX.pyx":44
* aux1=0.0
* aux2=0.0
* n=0 # <<<<<<<<<<<<<<
* # zero_entries=0
* L=genflux_sectors.shape[0]
*/
__pyx_v_n = 0;
/* "DX.pyx":46
* n=0
* # zero_entries=0
* L=genflux_sectors.shape[0] # <<<<<<<<<<<<<<
* for ni in prange(L,nogil=True):
* # for ni in xrange(L):
*/
__pyx_v_L = (__pyx_v_genflux_sectors->dimensions[0]);
/* "DX.pyx":47
* # zero_entries=0
* L=genflux_sectors.shape[0]
* for ni in prange(L,nogil=True): # <<<<<<<<<<<<<<
* # for ni in xrange(L):
* n=genflux_sectors[ni]
*/
{
#ifdef WITH_THREAD
PyThreadState *_save;
Py_UNBLOCK_THREADS
__Pyx_FastGIL_Remember();
#endif
/*try:*/ {
__pyx_t_10 = __pyx_v_L;
if (1 == 0) abort();
{
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) (x)
#define unlikely(x) (x)
#endif
__pyx_t_12 = (__pyx_t_10 - 0 + 1 - 1/abs(1)) / 1;
if (__pyx_t_12 > 0)
{
#ifdef _OPENMP
#pragma omp parallel private(__pyx_t_1, __pyx_t_100, __pyx_t_101, __pyx_t_102, __pyx_t_13, __pyx_t_14, __pyx_t_15, __pyx_t_16, __pyx_t_17, __pyx_t_18, __pyx_t_19, __pyx_t_20, __pyx_t_21, __pyx_t_22, __pyx_t_23, __pyx_t_24, __pyx_t_25, __pyx_t_26, __pyx_t_27, __pyx_t_28, __pyx_t_29, __pyx_t_30, __pyx_t_31, __pyx_t_32, __pyx_t_33, __pyx_t_34, __pyx_t_35, __pyx_t_36, __pyx_t_37, __pyx_t_38, __pyx_t_39, __pyx_t_40, __pyx_t_41, __pyx_t_42, __pyx_t_43, __pyx_t_44, __pyx_t_45, __pyx_t_46, __pyx_t_47, __pyx_t_48, __pyx_t_49, __pyx_t_50, __pyx_t_51, __pyx_t_52, __pyx_t_53, __pyx_t_54, __pyx_t_55, __pyx_t_56, __pyx_t_57, __pyx_t_58, __pyx_t_59, __pyx_t_60, __pyx_t_61, __pyx_t_62, __pyx_t_63, __pyx_t_64, __pyx_t_65, __pyx_t_66, __pyx_t_67, __pyx_t_68, __pyx_t_69, __pyx_t_70, __pyx_t_71, __pyx_t_72, __pyx_t_73, __pyx_t_74, __pyx_t_75, __pyx_t_76, __pyx_t_77, __pyx_t_78, __pyx_t_79, __pyx_t_80, __pyx_t_81, __pyx_t_82, __pyx_t_83, __pyx_t_84, __pyx_t_85, __pyx_t_86, __pyx_t_87, __pyx_t_88, __pyx_t_89, __pyx_t_90, __pyx_t_91, __pyx_t_92, __pyx_t_93, __pyx_t_94, __pyx_t_95, __pyx_t_96, __pyx_t_97, __pyx_t_98, __pyx_t_99)
#endif /* _OPENMP */
{
#ifdef _OPENMP
#pragma omp for lastprivate(__pyx_v_aux1) lastprivate(__pyx_v_aux2) lastprivate(__pyx_v_delta_mn) lastprivate(__pyx_v_i) lastprivate(__pyx_v_j) lastprivate(__pyx_v_k) lastprivate(__pyx_v_m) lastprivate(__pyx_v_n) firstprivate(__pyx_v_ni) lastprivate(__pyx_v_ni) lastprivate(__pyx_v_sum_mi)
#endif /* _OPENMP */
for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_12; __pyx_t_11++){
{
__pyx_v_ni = (unsigned int)(0 + 1 * __pyx_t_11);
/* Initialize private variables to invalid values */
__pyx_v_aux1 = ((__pyx_t_2DX_DTYPE_t)__PYX_NAN());
__pyx_v_aux2 = ((__pyx_t_2DX_DTYPE_t)__PYX_NAN());
__pyx_v_delta_mn = ((__pyx_t_2DX_DTYPE_t)__PYX_NAN());
__pyx_v_i = ((unsigned int)0xbad0bad0);
__pyx_v_j = ((unsigned int)0xbad0bad0);
__pyx_v_k = ((unsigned int)0xbad0bad0);
__pyx_v_m = ((unsigned int)0xbad0bad0);
__pyx_v_n = ((unsigned int)0xbad0bad0);
__pyx_v_sum_mi = ((__pyx_t_2DX_DTYPE_t)__PYX_NAN());
/* "DX.pyx":49
* for ni in prange(L,nogil=True):
* # for ni in xrange(L):
* n=genflux_sectors[ni] # <<<<<<<<<<<<<<
* for j in xrange(nZones):
* for k in xrange(nZones):
*/
__pyx_t_13 = __pyx_v_ni;
__pyx_v_n = (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_int_t *, __pyx_pybuffernd_genflux_sectors.rcbuffer->pybuffer.buf, __pyx_t_13, __pyx_pybuffernd_genflux_sectors.diminfo[0].strides));
/* "DX.pyx":50
* # for ni in xrange(L):
* n=genflux_sectors[ni]
* for j in xrange(nZones): # <<<<<<<<<<<<<<
* for k in xrange(nZones):
* sum_mi=0.0
*/
__pyx_t_14 = __pyx_v_nZones;
for (__pyx_t_15 = 0; __pyx_t_15 < __pyx_t_14; __pyx_t_15+=1) {
__pyx_v_j = __pyx_t_15;
/* "DX.pyx":51
* n=genflux_sectors[ni]
* for j in xrange(nZones):
* for k in xrange(nZones): # <<<<<<<<<<<<<<
* sum_mi=0.0
*
*/
__pyx_t_16 = __pyx_v_nZones;
for (__pyx_t_17 = 0; __pyx_t_17 < __pyx_t_16; __pyx_t_17+=1) {
__pyx_v_k = __pyx_t_17;
/* "DX.pyx":52
* for j in xrange(nZones):
* for k in xrange(nZones):
* sum_mi=0.0 # <<<<<<<<<<<<<<
*
* for m in xrange(nSectors):
*/
__pyx_v_sum_mi = 0.0;
/* "DX.pyx":54
* sum_mi=0.0
*
* for m in xrange(nSectors): # <<<<<<<<<<<<<<
* if alpha[m,n,0]==0:#if there is no consume of n by m )cheking just 1 zone, could be any
* pass
*/
__pyx_t_18 = __pyx_v_nSectors;
for (__pyx_t_19 = 0; __pyx_t_19 < __pyx_t_18; __pyx_t_19+=1) {
__pyx_v_m = __pyx_t_19;
/* "DX.pyx":55
*
* for m in xrange(nSectors):
* if alpha[m,n,0]==0:#if there is no consume of n by m )cheking just 1 zone, could be any # <<<<<<<<<<<<<<
* pass
* else:
*/
__pyx_t_20 = __pyx_v_m;
__pyx_t_21 = __pyx_v_n;
__pyx_t_22 = 0;
if (__pyx_t_22 < 0) __pyx_t_22 += __pyx_pybuffernd_alpha.diminfo[2].shape;
__pyx_t_1 = (((*__Pyx_BufPtrStrided3d(__pyx_t_2DX_DTYPE_t *, __pyx_pybuffernd_alpha.rcbuffer->pybuffer.buf, __pyx_t_20, __pyx_pybuffernd_alpha.diminfo[0].strides, __pyx_t_21, __pyx_pybuffernd_alpha.diminfo[1].strides, __pyx_t_22, __pyx_pybuffernd_alpha.diminfo[2].strides)) == 0.0) != 0);
if (__pyx_t_1) {
goto __pyx_L25;
}
/* "DX.pyx":58
* pass
* else:
* delta_mn = delta[m,n] # <<<<<<<<<<<<<<
* aux2=lamda[n]*beta[n]
* if gap[m,n]!=0: #this is always cero! demin=demax
*/
/*else*/ {
__pyx_t_23 = __pyx_v_m;
__pyx_t_24 = __pyx_v_n;
__pyx_v_delta_mn = (*__Pyx_BufPtrStrided2d(__pyx_t_2DX_DTYPE_t *, __pyx_pybuffernd_delta.rcbuffer->pybuffer.buf, __pyx_t_23, __pyx_pybuffernd_delta.diminfo[0].strides, __pyx_t_24, __pyx_pybuffernd_delta.diminfo[1].strides));
/* "DX.pyx":59
* else:
* delta_mn = delta[m,n]
* aux2=lamda[n]*beta[n] # <<<<<<<<<<<<<<
* if gap[m,n]!=0: #this is always cero! demin=demax
* aux1=gap[m,n]*delta_mn
*/
__pyx_t_25 = __pyx_v_n;
__pyx_t_26 = __pyx_v_n;
__pyx_v_aux2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_2DX_DTYPE_t *, __pyx_pybuffernd_lamda.rcbuffer->pybuffer.buf, __pyx_t_25, __pyx_pybuffernd_lamda.diminfo[0].strides)) * (*__Pyx_BufPtrStrided1d(__pyx_t_2DX_DTYPE_t *, __pyx_pybuffernd_beta.rcbuffer->pybuffer.buf, __pyx_t_26, __pyx_pybuffernd_beta.diminfo[0].strides)));
/* "DX.pyx":60
* delta_mn = delta[m,n]
* aux2=lamda[n]*beta[n]
* if gap[m,n]!=0: #this is always cero! demin=demax # <<<<<<<<<<<<<<
* aux1=gap[m,n]*delta_mn
* for i in xrange(nZones):
*/
__pyx_t_27 = __pyx_v_m;
__pyx_t_28 = __pyx_v_n;
__pyx_t_1 = (((*__Pyx_BufPtrStrided2d(__pyx_t_2DX_DTYPE_t *, __pyx_pybuffernd_gap.rcbuffer->pybuffer.buf, __pyx_t_27, __pyx_pybuffernd_gap.diminfo[0].strides, __pyx_t_28, __pyx_pybuffernd_gap.diminfo[1].strides)) != 0.0) != 0);
if (__pyx_t_1) {
/* "DX.pyx":61
* aux2=lamda[n]*beta[n]
* if gap[m,n]!=0: #this is always cero! demin=demax
* aux1=gap[m,n]*delta_mn # <<<<<<<<<<<<<<
* for i in xrange(nZones):
* if k==j:
*/
__pyx_t_29 = __pyx_v_m;
__pyx_t_30 = __pyx_v_n;
__pyx_v_aux1 = ((*__Pyx_BufPtrStrided2d(__pyx_t_2DX_DTYPE_t *, __pyx_pybuffernd_gap.rcbuffer->pybuffer.buf, __pyx_t_29, __pyx_pybuffernd_gap.diminfo[0].strides, __pyx_t_30, __pyx_pybuffernd_gap.diminfo[1].strides)) * __pyx_v_delta_mn);
/* "DX.pyx":62
* if gap[m,n]!=0: #this is always cero! demin=demax
* aux1=gap[m,n]*delta_mn
* for i in xrange(nZones): # <<<<<<<<<<<<<<
* if k==j:
* sum_mi=sum_mi+(-aux1*exp(-delta_mn*U[n,i])*Pr[n,i,k]*Pr[n,i,j]+alpha[m,n,i]*(-aux2*(Pr[n,i,j]-Pr[n,i,j]*Pr[n,i,j])))*X_0[m,i]
*/
__pyx_t_31 = __pyx_v_nZones;
for (__pyx_t_32 = 0; __pyx_t_32 < __pyx_t_31; __pyx_t_32+=1) {
__pyx_v_i = __pyx_t_32;
/* "DX.pyx":63
* aux1=gap[m,n]*delta_mn
* for i in xrange(nZones):
* if k==j: # <<<<<<<<<<<<<<
* sum_mi=sum_mi+(-aux1*exp(-delta_mn*U[n,i])*Pr[n,i,k]*Pr[n,i,j]+alpha[m,n,i]*(-aux2*(Pr[n,i,j]-Pr[n,i,j]*Pr[n,i,j])))*X_0[m,i]
* else:
*/
__pyx_t_1 = ((__pyx_v_k == __pyx_v_j) != 0);
if (__pyx_t_1) {
/* "DX.pyx":64
* for i in xrange(nZones):
* if k==j:
* sum_mi=sum_mi+(-aux1*exp(-delta_mn*U[n,i])*Pr[n,i,k]*Pr[n,i,j]+alpha[m,n,i]*(-aux2*(Pr[n,i,j]-Pr[n,i,j]*Pr[n,i,j])))*X_0[m,i] # <<<<<<<<<<<<<<
* else:
* sum_mi=sum_mi+(-aux1*exp(-delta_mn*U[n,i])*Pr[n,i,k]*Pr[n,i,j]+alpha[m,n,i]*(aux2*Pr[n,i,j]*Pr[n,i,k]))*X_0[m,i]
*/
__pyx_t_33 = __pyx_v_n;
__pyx_t_34 = __pyx_v_i;
__pyx_t_35 = __pyx_v_n;
__pyx_t_36 = __pyx_v_i;
__pyx_t_37 = __pyx_v_k;
__pyx_t_38 = __pyx_v_n;
__pyx_t_39 = __pyx_v_i;
__pyx_t_40 = __pyx_v_j;
__pyx_t_41 = __pyx_v_m;
__pyx_t_42 = __pyx_v_n;
__pyx_t_43 = __pyx_v_i;
__pyx_t_44 = __pyx_v_n;
__pyx_t_45 = __pyx_v_i;
__pyx_t_46 = __pyx_v_j;
__pyx_t_47 = __pyx_v_n;
__pyx_t_48 = __pyx_v_i;
__pyx_t_49 = __pyx_v_j;
__pyx_t_50 = __pyx_v_n;
__pyx_t_51 = __pyx_v_i;
__pyx_t_52 = __pyx_v_j;
__pyx_t_53 = __pyx_v_m;
__pyx_t_54 = __pyx_v_i;
__pyx_v_sum_mi = (__pyx_v_sum_mi + ((((((-__pyx_v_aux1) * exp(((-__pyx_v_delta_mn) * (*__Pyx_BufPtrStrided2d(__pyx_t_2DX_DTYPE_t *, __pyx_pybuffernd_U.rcbuffer->pybuffer.buf, __pyx_t_33, __pyx_pybuffernd_U.diminfo[0].strides, __pyx_t_34, __pyx_pybuffernd_U.diminfo[1].strides))))) * (*__Pyx_BufPtrStrided3d(__pyx_t_2DX_DTYPE_t *, __pyx_pybuffernd_Pr.rcbuffer->pybuffer.buf, __pyx_t_35, __pyx_pybuffernd_Pr.diminfo[0].strides, __pyx_t_36, __pyx_pybuffernd_Pr.diminfo[1].strides, __pyx_t_37, __pyx_pybuffernd_Pr.diminfo[2].strides))) * (*__Pyx_BufPtrStrided3d(__pyx_t_2DX_DTYPE_t *, __pyx_pybuffernd_Pr.rcbuffer->pybuffer.buf, __pyx_t_38, __pyx_pybuffernd_Pr.diminfo[0].strides, __pyx_t_39, __pyx_pybuffernd_Pr.diminfo[1].strides, __pyx_t_40, __pyx_pybuffernd_Pr.diminfo[2].strides))) + ((*__Pyx_BufPtrStrided3d(__pyx_t_2DX_DTYPE_t *, __pyx_pybuffernd_alpha.rcbuffer->pybuffer.buf, __pyx_t_41, __pyx_pybuffernd_alpha.diminfo[0].strides, __pyx_t_42, __pyx_pybuffernd_alpha.diminfo[1].strides, __pyx_t_43, __pyx_pybuffernd_alpha.diminfo[2].strides)) * ((-__pyx_v_aux2) * ((*__Pyx_BufPtrStrided3d(__pyx_t_2DX_DTYPE_t *, __pyx_pybuffernd_Pr.rcbuffer->pybuffer.buf, __pyx_t_44, __pyx_pybuffernd_Pr.diminfo[0].strides, __pyx_t_45, __pyx_pybuffernd_Pr.diminfo[1].strides, __pyx_t_46, __pyx_pybuffernd_Pr.diminfo[2].strides)) - ((*__Pyx_BufPtrStrided3d(__pyx_t_2DX_DTYPE_t *, __pyx_pybuffernd_Pr.rcbuffer->pybuffer.buf, __pyx_t_47, __pyx_pybuffernd_Pr.diminfo[0].strides, __pyx_t_48, __pyx_pybuffernd_Pr.diminfo[1].strides, __pyx_t_49, __pyx_pybuffernd_Pr.diminfo[2].strides)) * (*__Pyx_BufPtrStrided3d(__pyx_t_2DX_DTYPE_t *, __pyx_pybuffernd_Pr.rcbuffer->pybuffer.buf, __pyx_t_50, __pyx_pybuffernd_Pr.diminfo[0].strides, __pyx_t_51, __pyx_pybuffernd_Pr.diminfo[1].strides, __pyx_t_52, __pyx_pybuffernd_Pr.diminfo[2].strides))))))) * (*__Pyx_BufPtrStrided2d(__pyx_t_2DX_DTYPE_t *, __pyx_pybuffernd_X_0.rcbuffer->pybuffer.buf, __pyx_t_53, __pyx_pybuffernd_X_0.diminfo[0].strides, __pyx_t_54, __pyx_pybuffernd_X_0.diminfo[1].strides))));
/* "DX.pyx":63
* aux1=gap[m,n]*delta_mn
* for i in xrange(nZones):
* if k==j: # <<<<<<<<<<<<<<
* sum_mi=sum_mi+(-aux1*exp(-delta_mn*U[n,i])*Pr[n,i,k]*Pr[n,i,j]+alpha[m,n,i]*(-aux2*(Pr[n,i,j]-Pr[n,i,j]*Pr[n,i,j])))*X_0[m,i]
* else:
*/
goto __pyx_L29;
}
/* "DX.pyx":66
* sum_mi=sum_mi+(-aux1*exp(-delta_mn*U[n,i])*Pr[n,i,k]*Pr[n,i,j]+alpha[m,n,i]*(-aux2*(Pr[n,i,j]-Pr[n,i,j]*Pr[n,i,j])))*X_0[m,i]
* else:
* sum_mi=sum_mi+(-aux1*exp(-delta_mn*U[n,i])*Pr[n,i,k]*Pr[n,i,j]+alpha[m,n,i]*(aux2*Pr[n,i,j]*Pr[n,i,k]))*X_0[m,i] # <<<<<<<<<<<<<<
* else:
* for i in xrange(nZones):
*/
/*else*/ {
__pyx_t_55 = __pyx_v_n;
__pyx_t_56 = __pyx_v_i;
__pyx_t_57 = __pyx_v_n;
__pyx_t_58 = __pyx_v_i;
__pyx_t_59 = __pyx_v_k;
__pyx_t_60 = __pyx_v_n;
__pyx_t_61 = __pyx_v_i;
__pyx_t_62 = __pyx_v_j;
__pyx_t_63 = __pyx_v_m;
__pyx_t_64 = __pyx_v_n;
__pyx_t_65 = __pyx_v_i;
__pyx_t_66 = __pyx_v_n;
__pyx_t_67 = __pyx_v_i;
__pyx_t_68 = __pyx_v_j;
__pyx_t_69 = __pyx_v_n;
__pyx_t_70 = __pyx_v_i;
__pyx_t_71 = __pyx_v_k;
__pyx_t_72 = __pyx_v_m;
__pyx_t_73 = __pyx_v_i;
__pyx_v_sum_mi = (__pyx_v_sum_mi + ((((((-__pyx_v_aux1) * exp(((-__pyx_v_delta_mn) * (*__Pyx_BufPtrStrided2d(__pyx_t_2DX_DTYPE_t *, __pyx_pybuffernd_U.rcbuffer->pybuffer.buf, __pyx_t_55, __pyx_pybuffernd_U.diminfo[0].strides, __pyx_t_56, __pyx_pybuffernd_U.diminfo[1].strides))))) * (*__Pyx_BufPtrStrided3d(__pyx_t_2DX_DTYPE_t *, __pyx_pybuffernd_Pr.rcbuffer->pybuffer.buf, __pyx_t_57, __pyx_pybuffernd_Pr.diminfo[0].strides, __pyx_t_58, __pyx_pybuffernd_Pr.diminfo[1].strides, __pyx_t_59, __pyx_pybuffernd_Pr.diminfo[2].strides))) * (*__Pyx_BufPtrStrided3d(__pyx_t_2DX_DTYPE_t *, __pyx_pybuffernd_Pr.rcbuffer->pybuffer.buf, __pyx_t_60, __pyx_pybuffernd_Pr.diminfo[0].strides, __pyx_t_61, __pyx_pybuffernd_Pr.diminfo[1].strides, __pyx_t_62, __pyx_pybuffernd_Pr.diminfo[2].strides))) + ((*__Pyx_BufPtrStrided3d(__pyx_t_2DX_DTYPE_t *, __pyx_pybuffernd_alpha.rcbuffer->pybuffer.buf, __pyx_t_63, __pyx_pybuffernd_alpha.diminfo[0].strides, __pyx_t_64, __pyx_pybuffernd_alpha.diminfo[1].strides, __pyx_t_65, __pyx_pybuffernd_alpha.diminfo[2].strides)) * ((__pyx_v_aux2 * (*__Pyx_BufPtrStrided3d(__pyx_t_2DX_DTYPE_t *, __pyx_pybuffernd_Pr.rcbuffer->pybuffer.buf, __pyx_t_66, __pyx_pybuffernd_Pr.diminfo[0].strides, __pyx_t_67, __pyx_pybuffernd_Pr.diminfo[1].strides, __pyx_t_68, __pyx_pybuffernd_Pr.diminfo[2].strides))) * (*__Pyx_BufPtrStrided3d(__pyx_t_2DX_DTYPE_t *, __pyx_pybuffernd_Pr.rcbuffer->pybuffer.buf, __pyx_t_69, __pyx_pybuffernd_Pr.diminfo[0].strides, __pyx_t_70, __pyx_pybuffernd_Pr.diminfo[1].strides, __pyx_t_71, __pyx_pybuffernd_Pr.diminfo[2].strides))))) * (*__Pyx_BufPtrStrided2d(__pyx_t_2DX_DTYPE_t *, __pyx_pybuffernd_X_0.rcbuffer->pybuffer.buf, __pyx_t_72, __pyx_pybuffernd_X_0.diminfo[0].strides, __pyx_t_73, __pyx_pybuffernd_X_0.diminfo[1].strides))));
}
__pyx_L29:;
}
/* "DX.pyx":60
* delta_mn = delta[m,n]
* aux2=lamda[n]*beta[n]
* if gap[m,n]!=0: #this is always cero! demin=demax # <<<<<<<<<<<<<<
* aux1=gap[m,n]*delta_mn
* for i in xrange(nZones):
*/
goto __pyx_L26;
}
/* "DX.pyx":68
* sum_mi=sum_mi+(-aux1*exp(-delta_mn*U[n,i])*Pr[n,i,k]*Pr[n,i,j]+alpha[m,n,i]*(aux2*Pr[n,i,j]*Pr[n,i,k]))*X_0[m,i]
* else:
* for i in xrange(nZones): # <<<<<<<<<<<<<<
* if k==j:
* sum_mi=sum_mi+(alpha[m,n,i]*(-aux2*(Pr[n,i,j]-Pr[n,i,j]*Pr[n,i,j])))*X_0[m,i]
*/
/*else*/ {
__pyx_t_31 = __pyx_v_nZones;
for (__pyx_t_32 = 0; __pyx_t_32 < __pyx_t_31; __pyx_t_32+=1) {
__pyx_v_i = __pyx_t_32;
/* "DX.pyx":69
* else:
* for i in xrange(nZones):
* if k==j: # <<<<<<<<<<<<<<
* sum_mi=sum_mi+(alpha[m,n,i]*(-aux2*(Pr[n,i,j]-Pr[n,i,j]*Pr[n,i,j])))*X_0[m,i]
* else:
*/
__pyx_t_1 = ((__pyx_v_k == __pyx_v_j) != 0);
if (__pyx_t_1) {
/* "DX.pyx":70
* for i in xrange(nZones):
* if k==j:
* sum_mi=sum_mi+(alpha[m,n,i]*(-aux2*(Pr[n,i,j]-Pr[n,i,j]*Pr[n,i,j])))*X_0[m,i] # <<<<<<<<<<<<<<
* else:
* sum_mi=sum_mi+(alpha[m,n,i]*(aux2*Pr[n,i,j]*Pr[n,i,k]))*X_0[m,i]
*/
__pyx_t_74 = __pyx_v_m;
__pyx_t_75 = __pyx_v_n;
__pyx_t_76 = __pyx_v_i;
__pyx_t_77 = __pyx_v_n;
__pyx_t_78 = __pyx_v_i;
__pyx_t_79 = __pyx_v_j;
__pyx_t_80 = __pyx_v_n;
__pyx_t_81 = __pyx_v_i;
__pyx_t_82 = __pyx_v_j;
__pyx_t_83 = __pyx_v_n;
__pyx_t_84 = __pyx_v_i;
__pyx_t_85 = __pyx_v_j;
__pyx_t_86 = __pyx_v_m;
__pyx_t_87 = __pyx_v_i;
__pyx_v_sum_mi = (__pyx_v_sum_mi + (((*__Pyx_BufPtrStrided3d(__pyx_t_2DX_DTYPE_t *, __pyx_pybuffernd_alpha.rcbuffer->pybuffer.buf, __pyx_t_74, __pyx_pybuffernd_alpha.diminfo[0].strides, __pyx_t_75, __pyx_pybuffernd_alpha.diminfo[1].strides, __pyx_t_76, __pyx_pybuffernd_alpha.diminfo[2].strides)) * ((-__pyx_v_aux2) * ((*__Pyx_BufPtrStrided3d(__pyx_t_2DX_DTYPE_t *, __pyx_pybuffernd_Pr.rcbuffer->pybuffer.buf, __pyx_t_77, __pyx_pybuffernd_Pr.diminfo[0].strides, __pyx_t_78, __pyx_pybuffernd_Pr.diminfo[1].strides, __pyx_t_79, __pyx_pybuffernd_Pr.diminfo[2].strides)) - ((*__Pyx_BufPtrStrided3d(__pyx_t_2DX_DTYPE_t *, __pyx_pybuffernd_Pr.rcbuffer->pybuffer.buf, __pyx_t_80, __pyx_pybuffernd_Pr.diminfo[0].strides, __pyx_t_81, __pyx_pybuffernd_Pr.diminfo[1].strides, __pyx_t_82, __pyx_pybuffernd_Pr.diminfo[2].strides)) * (*__Pyx_BufPtrStrided3d(__pyx_t_2DX_DTYPE_t *, __pyx_pybuffernd_Pr.rcbuffer->pybuffer.buf, __pyx_t_83, __pyx_pybuffernd_Pr.diminfo[0].strides, __pyx_t_84, __pyx_pybuffernd_Pr.diminfo[1].strides, __pyx_t_85, __pyx_pybuffernd_Pr.diminfo[2].strides)))))) * (*__Pyx_BufPtrStrided2d(__pyx_t_2DX_DTYPE_t *, __pyx_pybuffernd_X_0.rcbuffer->pybuffer.buf, __pyx_t_86, __pyx_pybuffernd_X_0.diminfo[0].strides, __pyx_t_87, __pyx_pybuffernd_X_0.diminfo[1].strides))));
/* "DX.pyx":69
* else:
* for i in xrange(nZones):
* if k==j: # <<<<<<<<<<<<<<
* sum_mi=sum_mi+(alpha[m,n,i]*(-aux2*(Pr[n,i,j]-Pr[n,i,j]*Pr[n,i,j])))*X_0[m,i]
* else:
*/
goto __pyx_L32;
}
/* "DX.pyx":72
* sum_mi=sum_mi+(alpha[m,n,i]*(-aux2*(Pr[n,i,j]-Pr[n,i,j]*Pr[n,i,j])))*X_0[m,i]
* else:
* sum_mi=sum_mi+(alpha[m,n,i]*(aux2*Pr[n,i,j]*Pr[n,i,k]))*X_0[m,i] # <<<<<<<<<<<<<<
* DX_h[n,j,n,k]=sum_mi
* # if sum_mi==0:
*/
/*else*/ {
__pyx_t_88 = __pyx_v_m;
__pyx_t_89 = __pyx_v_n;
__pyx_t_90 = __pyx_v_i;
__pyx_t_91 = __pyx_v_n;
__pyx_t_92 = __pyx_v_i;
__pyx_t_93 = __pyx_v_j;
__pyx_t_94 = __pyx_v_n;
__pyx_t_95 = __pyx_v_i;
__pyx_t_96 = __pyx_v_k;
__pyx_t_97 = __pyx_v_m;
__pyx_t_98 = __pyx_v_i;
__pyx_v_sum_mi = (__pyx_v_sum_mi + (((*__Pyx_BufPtrStrided3d(__pyx_t_2DX_DTYPE_t *, __pyx_pybuffernd_alpha.rcbuffer->pybuffer.buf, __pyx_t_88, __pyx_pybuffernd_alpha.diminfo[0].strides, __pyx_t_89, __pyx_pybuffernd_alpha.diminfo[1].strides, __pyx_t_90, __pyx_pybuffernd_alpha.diminfo[2].strides)) * ((__pyx_v_aux2 * (*__Pyx_BufPtrStrided3d(__pyx_t_2DX_DTYPE_t *, __pyx_pybuffernd_Pr.rcbuffer->pybuffer.buf, __pyx_t_91, __pyx_pybuffernd_Pr.diminfo[0].strides, __pyx_t_92, __pyx_pybuffernd_Pr.diminfo[1].strides, __pyx_t_93, __pyx_pybuffernd_Pr.diminfo[2].strides))) * (*__Pyx_BufPtrStrided3d(__pyx_t_2DX_DTYPE_t *, __pyx_pybuffernd_Pr.rcbuffer->pybuffer.buf, __pyx_t_94, __pyx_pybuffernd_Pr.diminfo[0].strides, __pyx_t_95, __pyx_pybuffernd_Pr.diminfo[1].strides, __pyx_t_96, __pyx_pybuffernd_Pr.diminfo[2].strides)))) * (*__Pyx_BufPtrStrided2d(__pyx_t_2DX_DTYPE_t *, __pyx_pybuffernd_X_0.rcbuffer->pybuffer.buf, __pyx_t_97, __pyx_pybuffernd_X_0.diminfo[0].strides, __pyx_t_98, __pyx_pybuffernd_X_0.diminfo[1].strides))));
}
__pyx_L32:;
}
}
__pyx_L26:;
}
__pyx_L25:;
}
/* "DX.pyx":73
* else:
* sum_mi=sum_mi+(alpha[m,n,i]*(aux2*Pr[n,i,j]*Pr[n,i,k]))*X_0[m,i]
* DX_h[n,j,n,k]=sum_mi # <<<<<<<<<<<<<<
* # if sum_mi==0:
* # zero_entries=zero_entries+1
*/
__pyx_t_99 = __pyx_v_n;
__pyx_t_100 = __pyx_v_j;
__pyx_t_101 = __pyx_v_n;
__pyx_t_102 = __pyx_v_k;
*__Pyx_BufPtrStrided4d(__pyx_t_2DX_DTYPE_t *, __pyx_pybuffernd_DX_h.rcbuffer->pybuffer.buf, __pyx_t_99, __pyx_pybuffernd_DX_h.diminfo[0].strides, __pyx_t_100, __pyx_pybuffernd_DX_h.diminfo[1].strides, __pyx_t_101, __pyx_pybuffernd_DX_h.diminfo[2].strides, __pyx_t_102, __pyx_pybuffernd_DX_h.diminfo[3].strides) = __pyx_v_sum_mi;
}
}
}
}
}
}
}
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
}
/* "DX.pyx":47
* # zero_entries=0
* L=genflux_sectors.shape[0]
* for ni in prange(L,nogil=True): # <<<<<<<<<<<<<<
* # for ni in xrange(L):
* n=genflux_sectors[ni]
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
__Pyx_FastGIL_Forget();
Py_BLOCK_THREADS
#endif
goto __pyx_L14;
}
__pyx_L14:;
}
}
/* "DX.pyx":78
* #print 'finished!'
* # print zero_entries
* return DX_h # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_DX_h));
__pyx_r = ((PyObject *)__pyx_v_DX_h);
goto __pyx_L0;
/* "DX.pyx":19
*
* @cython.boundscheck(False) # turn of bounds-checking for entire function
* def cython_DX_h(c_np.ndarray[DTYPE_t, ndim=3] Pr, # <<<<<<<<<<<<<<
* c_np.ndarray[DTYPE_t, ndim=1] lamda,
* c_np.ndarray[DTYPE_t, ndim=1] beta,
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_DX_h.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_Pr.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_U.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_X_0.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_alpha.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_beta.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_delta.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_gap.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_genflux_sectors.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_lamda.rcbuffer->pybuffer);
__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
__Pyx_AddTraceback("DX.cython_DX_h", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
goto __pyx_L2;
__pyx_L0:;
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_DX_h.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_Pr.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_U.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_X_0.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_alpha.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_beta.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_delta.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_gap.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_genflux_sectors.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_lamda.rcbuffer->pybuffer);
__pyx_L2:;
__Pyx_XDECREF((PyObject *)__pyx_v_DX_h);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "DX.pyx":82
*
* @cython.boundscheck(False) # turn of bounds-checking for entire function
* def cython_DX_n(c_np.ndarray[DTYPE_t, ndim=2] DX, # <<<<<<<<<<<<<<
* int nSectors,
* int nZones,
*/
/* Python wrapper */
static PyObject *__pyx_pw_2DX_3cython_DX_n(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_mdef_2DX_3cython_DX_n = {"cython_DX_n", (PyCFunction)__pyx_pw_2DX_3cython_DX_n, METH_VARARGS|METH_KEYWORDS, 0};
static PyObject *__pyx_pw_2DX_3cython_DX_n(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyArrayObject *__pyx_v_DX = 0;
CYTHON_UNUSED int __pyx_v_nSectors;
int __pyx_v_nZones;
__pyx_t_2DX_DTYPE_t __pyx_v_beta;
__pyx_t_2DX_DTYPE_t __pyx_v_lamda;
PyArrayObject *__pyx_v_D_n = 0;
PyArrayObject *__pyx_v_Pr_n = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("cython_DX_n (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_DX,&__pyx_n_s_nSectors,&__pyx_n_s_nZones,&__pyx_n_s_beta,&__pyx_n_s_lamda,&__pyx_n_s_D_n,&__pyx_n_s_Pr_n,0};
PyObject* values[7] = {0,0,0,0,0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
CYTHON_FALLTHROUGH;
case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
CYTHON_FALLTHROUGH;
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
CYTHON_FALLTHROUGH;
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_DX)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_nSectors)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("cython_DX_n", 1, 7, 7, 1); __PYX_ERR(0, 82, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_nZones)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("cython_DX_n", 1, 7, 7, 2); __PYX_ERR(0, 82, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 3:
if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_beta)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("cython_DX_n", 1, 7, 7, 3); __PYX_ERR(0, 82, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 4:
if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_lamda)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("cython_DX_n", 1, 7, 7, 4); __PYX_ERR(0, 82, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 5:
if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_D_n)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("cython_DX_n", 1, 7, 7, 5); __PYX_ERR(0, 82, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 6:
if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_Pr_n)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("cython_DX_n", 1, 7, 7, 6); __PYX_ERR(0, 82, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "cython_DX_n") < 0)) __PYX_ERR(0, 82, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 7) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
}
__pyx_v_DX = ((PyArrayObject *)values[0]);
__pyx_v_nSectors = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_nSectors == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 83, __pyx_L3_error)
__pyx_v_nZones = __Pyx_PyInt_As_int(values[2]); if (unlikely((__pyx_v_nZones == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 84, __pyx_L3_error)
__pyx_v_beta = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_beta == ((npy_double)-1)) && PyErr_Occurred())) __PYX_ERR(0, 85, __pyx_L3_error)
__pyx_v_lamda = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_lamda == ((npy_double)-1)) && PyErr_Occurred())) __PYX_ERR(0, 86, __pyx_L3_error)
__pyx_v_D_n = ((PyArrayObject *)values[5]);
__pyx_v_Pr_n = ((PyArrayObject *)values[6]);
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("cython_DX_n", 1, 7, 7, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 82, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("DX.cython_DX_n", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_DX), __pyx_ptype_5numpy_ndarray, 1, "DX", 0))) __PYX_ERR(0, 82, __pyx_L1_error)
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_D_n), __pyx_ptype_5numpy_ndarray, 1, "D_n", 0))) __PYX_ERR(0, 87, __pyx_L1_error)
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_Pr_n), __pyx_ptype_5numpy_ndarray, 1, "Pr_n", 0))) __PYX_ERR(0, 88, __pyx_L1_error)
__pyx_r = __pyx_pf_2DX_2cython_DX_n(__pyx_self, __pyx_v_DX, __pyx_v_nSectors, __pyx_v_nZones, __pyx_v_beta, __pyx_v_lamda, __pyx_v_D_n, __pyx_v_Pr_n);
/* function exit code */
goto __pyx_L0;
__pyx_L1_error:;
__pyx_r = NULL;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_2DX_2cython_DX_n(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_DX, CYTHON_UNUSED int __pyx_v_nSectors, int __pyx_v_nZones, __pyx_t_2DX_DTYPE_t __pyx_v_beta, __pyx_t_2DX_DTYPE_t __pyx_v_lamda, PyArrayObject *__pyx_v_D_n, PyArrayObject *__pyx_v_Pr_n) {
unsigned int __pyx_v_i;
unsigned int __pyx_v_j;
unsigned int __pyx_v_k;
__pyx_t_2DX_DTYPE_t __pyx_v_sum_i;
__Pyx_LocalBuf_ND __pyx_pybuffernd_DX;
__Pyx_Buffer __pyx_pybuffer_DX;
__Pyx_LocalBuf_ND __pyx_pybuffernd_D_n;
__Pyx_Buffer __pyx_pybuffer_D_n;
__Pyx_LocalBuf_ND __pyx_pybuffernd_Pr_n;
__Pyx_Buffer __pyx_pybuffer_Pr_n;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_t_5;
int __pyx_t_6;
unsigned int __pyx_t_7;
unsigned int __pyx_t_8;
int __pyx_t_9;
unsigned int __pyx_t_10;
int __pyx_t_11;
unsigned int __pyx_t_12;
size_t __pyx_t_13;
size_t __pyx_t_14;
size_t __pyx_t_15;
size_t __pyx_t_16;
size_t __pyx_t_17;
size_t __pyx_t_18;
size_t __pyx_t_19;
size_t __pyx_t_20;
size_t __pyx_t_21;
size_t __pyx_t_22;
size_t __pyx_t_23;
size_t __pyx_t_24;
__Pyx_RefNannySetupContext("cython_DX_n", 0);
__pyx_pybuffer_DX.pybuffer.buf = NULL;
__pyx_pybuffer_DX.refcount = 0;
__pyx_pybuffernd_DX.data = NULL;
__pyx_pybuffernd_DX.rcbuffer = &__pyx_pybuffer_DX;
__pyx_pybuffer_D_n.pybuffer.buf = NULL;
__pyx_pybuffer_D_n.refcount = 0;
__pyx_pybuffernd_D_n.data = NULL;
__pyx_pybuffernd_D_n.rcbuffer = &__pyx_pybuffer_D_n;
__pyx_pybuffer_Pr_n.pybuffer.buf = NULL;
__pyx_pybuffer_Pr_n.refcount = 0;
__pyx_pybuffernd_Pr_n.data = NULL;
__pyx_pybuffernd_Pr_n.rcbuffer = &__pyx_pybuffer_Pr_n;
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_DX.rcbuffer->pybuffer, (PyObject*)__pyx_v_DX, &__Pyx_TypeInfo_nn___pyx_t_2DX_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 82, __pyx_L1_error)
}
__pyx_pybuffernd_DX.diminfo[0].strides = __pyx_pybuffernd_DX.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_DX.diminfo[0].shape = __pyx_pybuffernd_DX.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_DX.diminfo[1].strides = __pyx_pybuffernd_DX.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_DX.diminfo[1].shape = __pyx_pybuffernd_DX.rcbuffer->pybuffer.shape[1];
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_D_n.rcbuffer->pybuffer, (PyObject*)__pyx_v_D_n, &__Pyx_TypeInfo_nn___pyx_t_2DX_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) __PYX_ERR(0, 82, __pyx_L1_error)
}
__pyx_pybuffernd_D_n.diminfo[0].strides = __pyx_pybuffernd_D_n.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_D_n.diminfo[0].shape = __pyx_pybuffernd_D_n.rcbuffer->pybuffer.shape[0];
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_Pr_n.rcbuffer->pybuffer, (PyObject*)__pyx_v_Pr_n, &__Pyx_TypeInfo_nn___pyx_t_2DX_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 82, __pyx_L1_error)
}
__pyx_pybuffernd_Pr_n.diminfo[0].strides = __pyx_pybuffernd_Pr_n.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_Pr_n.diminfo[0].shape = __pyx_pybuffernd_Pr_n.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_Pr_n.diminfo[1].strides = __pyx_pybuffernd_Pr_n.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_Pr_n.diminfo[1].shape = __pyx_pybuffernd_Pr_n.rcbuffer->pybuffer.shape[1];
/* "DX.pyx":90
* c_np.ndarray[DTYPE_t, ndim=2] Pr_n):
*
* assert DX.dtype == DTYPE and D_n.dtype ==DTYPE and Pr_n.dtype == DTYPE # <<<<<<<<<<<<<<
*
* cdef unsigned int i,j,k #zero_entries
*/
#ifndef CYTHON_WITHOUT_ASSERTIONS
if (unlikely(!Py_OptimizeFlag)) {
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_DX), __pyx_n_s_dtype); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 90, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 90, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_t_2, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 90, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 90, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_5) {
} else {
__pyx_t_1 = __pyx_t_5;
goto __pyx_L3_bool_binop_done;
}
__pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_D_n), __pyx_n_s_dtype); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 90, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 90, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_2 = PyObject_RichCompare(__pyx_t_4, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 90, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 90, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (__pyx_t_5) {
} else {
__pyx_t_1 = __pyx_t_5;
goto __pyx_L3_bool_binop_done;
}
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_Pr_n), __pyx_n_s_dtype); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 90, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_DTYPE); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 90, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_t_2, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 90, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 90, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_1 = __pyx_t_5;
__pyx_L3_bool_binop_done:;
if (unlikely(!__pyx_t_1)) {
PyErr_SetNone(PyExc_AssertionError);
__PYX_ERR(0, 90, __pyx_L1_error)
}
}
#endif
/* "DX.pyx":94
* cdef unsigned int i,j,k #zero_entries
* cdef DTYPE_t sum_i
* sum_i = 0.0 # <<<<<<<<<<<<<<
*
* for j in prange(nZones,nogil=True):
*/
__pyx_v_sum_i = 0.0;
/* "DX.pyx":96
* sum_i = 0.0
*
* for j in prange(nZones,nogil=True): # <<<<<<<<<<<<<<
* for k in xrange(nZones):
* sum_i = 0
*/
{
#ifdef WITH_THREAD
PyThreadState *_save;
Py_UNBLOCK_THREADS
__Pyx_FastGIL_Remember();
#endif
/*try:*/ {
__pyx_t_6 = __pyx_v_nZones;
if (1 == 0) abort();
{
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) (x)
#define unlikely(x) (x)
#endif
__pyx_t_8 = (__pyx_t_6 - 0 + 1 - 1/abs(1)) / 1;
if (__pyx_t_8 > 0)
{
#ifdef _OPENMP
#pragma omp parallel private(__pyx_t_1, __pyx_t_10, __pyx_t_11, __pyx_t_12, __pyx_t_13, __pyx_t_14, __pyx_t_15, __pyx_t_16, __pyx_t_17, __pyx_t_18, __pyx_t_19, __pyx_t_20, __pyx_t_21, __pyx_t_22, __pyx_t_23, __pyx_t_24, __pyx_t_9)
#endif /* _OPENMP */
{
#ifdef _OPENMP
#pragma omp for lastprivate(__pyx_v_i) firstprivate(__pyx_v_j) lastprivate(__pyx_v_j) lastprivate(__pyx_v_k) lastprivate(__pyx_v_sum_i)
#endif /* _OPENMP */
for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_8; __pyx_t_7++){
{
__pyx_v_j = (unsigned int)(0 + 1 * __pyx_t_7);
/* Initialize private variables to invalid values */
__pyx_v_i = ((unsigned int)0xbad0bad0);
__pyx_v_k = ((unsigned int)0xbad0bad0);
__pyx_v_sum_i = ((__pyx_t_2DX_DTYPE_t)__PYX_NAN());
/* "DX.pyx":97
*
* for j in prange(nZones,nogil=True):
* for k in xrange(nZones): # <<<<<<<<<<<<<<
* sum_i = 0
* for i in xrange(nZones):
*/
__pyx_t_9 = __pyx_v_nZones;
for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_9; __pyx_t_10+=1) {
__pyx_v_k = __pyx_t_10;
/* "DX.pyx":98
* for j in prange(nZones,nogil=True):
* for k in xrange(nZones):
* sum_i = 0 # <<<<<<<<<<<<<<
* for i in xrange(nZones):
* if k==j:
*/
__pyx_v_sum_i = 0.0;
/* "DX.pyx":99
* for k in xrange(nZones):
* sum_i = 0
* for i in xrange(nZones): # <<<<<<<<<<<<<<
* if k==j:
* sum_i = sum_i + (-lamda*beta*(Pr_n[i,j]-Pr_n[i,j]**2))*D_n[i]
*/
__pyx_t_11 = __pyx_v_nZones;
for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_11; __pyx_t_12+=1) {
__pyx_v_i = __pyx_t_12;
/* "DX.pyx":100
* sum_i = 0
* for i in xrange(nZones):
* if k==j: # <<<<<<<<<<<<<<
* sum_i = sum_i + (-lamda*beta*(Pr_n[i,j]-Pr_n[i,j]**2))*D_n[i]
* else:
*/
__pyx_t_1 = ((__pyx_v_k == __pyx_v_j) != 0);
if (__pyx_t_1) {
/* "DX.pyx":101
* for i in xrange(nZones):
* if k==j:
* sum_i = sum_i + (-lamda*beta*(Pr_n[i,j]-Pr_n[i,j]**2))*D_n[i] # <<<<<<<<<<<<<<
* else:
* sum_i = sum_i +(lamda*beta*Pr_n[i,j]*Pr_n[i,k])*D_n[i]
*/
__pyx_t_13 = __pyx_v_i;
__pyx_t_14 = __pyx_v_j;
__pyx_t_15 = __pyx_v_i;
__pyx_t_16 = __pyx_v_j;
__pyx_t_17 = __pyx_v_i;
__pyx_v_sum_i = (__pyx_v_sum_i + ((((-__pyx_v_lamda) * __pyx_v_beta) * ((*__Pyx_BufPtrStrided2d(__pyx_t_2DX_DTYPE_t *, __pyx_pybuffernd_Pr_n.rcbuffer->pybuffer.buf, __pyx_t_13, __pyx_pybuffernd_Pr_n.diminfo[0].strides, __pyx_t_14, __pyx_pybuffernd_Pr_n.diminfo[1].strides)) - pow((*__Pyx_BufPtrStrided2d(__pyx_t_2DX_DTYPE_t *, __pyx_pybuffernd_Pr_n.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_Pr_n.diminfo[0].strides, __pyx_t_16, __pyx_pybuffernd_Pr_n.diminfo[1].strides)), 2.0))) * (*__Pyx_BufPtrStrided1d(__pyx_t_2DX_DTYPE_t *, __pyx_pybuffernd_D_n.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_D_n.diminfo[0].strides))));
/* "DX.pyx":100
* sum_i = 0
* for i in xrange(nZones):
* if k==j: # <<<<<<<<<<<<<<
* sum_i = sum_i + (-lamda*beta*(Pr_n[i,j]-Pr_n[i,j]**2))*D_n[i]
* else:
*/
goto __pyx_L17;
}
/* "DX.pyx":103
* sum_i = sum_i + (-lamda*beta*(Pr_n[i,j]-Pr_n[i,j]**2))*D_n[i]
* else:
* sum_i = sum_i +(lamda*beta*Pr_n[i,j]*Pr_n[i,k])*D_n[i] # <<<<<<<<<<<<<<
* DX[j,k] = sum_i
* return DX
*/
/*else*/ {
__pyx_t_18 = __pyx_v_i;
__pyx_t_19 = __pyx_v_j;
__pyx_t_20 = __pyx_v_i;
__pyx_t_21 = __pyx_v_k;
__pyx_t_22 = __pyx_v_i;
__pyx_v_sum_i = (__pyx_v_sum_i + ((((__pyx_v_lamda * __pyx_v_beta) * (*__Pyx_BufPtrStrided2d(__pyx_t_2DX_DTYPE_t *, __pyx_pybuffernd_Pr_n.rcbuffer->pybuffer.buf, __pyx_t_18, __pyx_pybuffernd_Pr_n.diminfo[0].strides, __pyx_t_19, __pyx_pybuffernd_Pr_n.diminfo[1].strides))) * (*__Pyx_BufPtrStrided2d(__pyx_t_2DX_DTYPE_t *, __pyx_pybuffernd_Pr_n.rcbuffer->pybuffer.buf, __pyx_t_20, __pyx_pybuffernd_Pr_n.diminfo[0].strides, __pyx_t_21, __pyx_pybuffernd_Pr_n.diminfo[1].strides))) * (*__Pyx_BufPtrStrided1d(__pyx_t_2DX_DTYPE_t *, __pyx_pybuffernd_D_n.rcbuffer->pybuffer.buf, __pyx_t_22, __pyx_pybuffernd_D_n.diminfo[0].strides))));
}
__pyx_L17:;
}
/* "DX.pyx":104
* else:
* sum_i = sum_i +(lamda*beta*Pr_n[i,j]*Pr_n[i,k])*D_n[i]
* DX[j,k] = sum_i # <<<<<<<<<<<<<<
* return DX
*
*/
__pyx_t_23 = __pyx_v_j;
__pyx_t_24 = __pyx_v_k;
*__Pyx_BufPtrStrided2d(__pyx_t_2DX_DTYPE_t *, __pyx_pybuffernd_DX.rcbuffer->pybuffer.buf, __pyx_t_23, __pyx_pybuffernd_DX.diminfo[0].strides, __pyx_t_24, __pyx_pybuffernd_DX.diminfo[1].strides) = __pyx_v_sum_i;
}
}
}
}
}
}
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
}
/* "DX.pyx":96
* sum_i = 0.0
*
* for j in prange(nZones,nogil=True): # <<<<<<<<<<<<<<
* for k in xrange(nZones):
* sum_i = 0
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
__Pyx_FastGIL_Forget();
Py_BLOCK_THREADS
#endif
goto __pyx_L8;
}
__pyx_L8:;
}
}
/* "DX.pyx":105
* sum_i = sum_i +(lamda*beta*Pr_n[i,j]*Pr_n[i,k])*D_n[i]
* DX[j,k] = sum_i
* return DX # <<<<<<<<<<<<<<
*
* # @cython.boundscheck(False) # turn of bounds-checking for entire function
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_DX));
__pyx_r = ((PyObject *)__pyx_v_DX);
goto __pyx_L0;
/* "DX.pyx":82
*
* @cython.boundscheck(False) # turn of bounds-checking for entire function
* def cython_DX_n(c_np.ndarray[DTYPE_t, ndim=2] DX, # <<<<<<<<<<<<<<
* int nSectors,
* int nZones,
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_DX.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_D_n.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_Pr_n.rcbuffer->pybuffer);
__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
__Pyx_AddTraceback("DX.cython_DX_n", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
goto __pyx_L2;
__pyx_L0:;
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_DX.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_D_n.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_Pr_n.rcbuffer->pybuffer);
__pyx_L2:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":197
* # experimental exception made for __getbuffer__ and __releasebuffer__
* # -- the details of this may change.
* def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<<
* # This implementation of getbuffer is geared towards Cython
* # requirements, and does not yet fullfill the PEP.
*/
/* Python wrapper */
static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
__pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_v_copy_shape;
int __pyx_v_i;
int __pyx_v_ndim;
int __pyx_v_endian_detector;
int __pyx_v_little_endian;
int __pyx_v_t;
char *__pyx_v_f;
PyArray_Descr *__pyx_v_descr = 0;
int __pyx_v_offset;
int __pyx_v_hasfields;
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
int __pyx_t_5;
PyObject *__pyx_t_6 = NULL;
char *__pyx_t_7;
__Pyx_RefNannySetupContext("__getbuffer__", 0);
if (__pyx_v_info != NULL) {
__pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
__Pyx_GIVEREF(__pyx_v_info->obj);
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":203
* # of flags
*
* if info == NULL: return # <<<<<<<<<<<<<<
*
* cdef int copy_shape, i, ndim
*/
__pyx_t_1 = ((__pyx_v_info == NULL) != 0);
if (__pyx_t_1) {
__pyx_r = 0;
goto __pyx_L0;
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":206
*
* cdef int copy_shape, i, ndim
* cdef int endian_detector = 1 # <<<<<<<<<<<<<<
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
*
*/
__pyx_v_endian_detector = 1;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":207
* cdef int copy_shape, i, ndim
* cdef int endian_detector = 1
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<<
*
* ndim = PyArray_NDIM(self)
*/
__pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0);
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":209
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
*
* ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<<
*
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
*/
__pyx_v_ndim = PyArray_NDIM(__pyx_v_self);
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":211
* ndim = PyArray_NDIM(self)
*
* if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<<
* copy_shape = 1
* else:
*/
__pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0);
if (__pyx_t_1) {
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":212
*
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
* copy_shape = 1 # <<<<<<<<<<<<<<
* else:
* copy_shape = 0
*/
__pyx_v_copy_shape = 1;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":211
* ndim = PyArray_NDIM(self)
*
* if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<<
* copy_shape = 1
* else:
*/
goto __pyx_L4;
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":214
* copy_shape = 1
* else:
* copy_shape = 0 # <<<<<<<<<<<<<<
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
*/
/*else*/ {
__pyx_v_copy_shape = 0;
}
__pyx_L4:;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":216
* copy_shape = 0
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous")
*/
__pyx_t_2 = (((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L6_bool_binop_done;
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":217
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<<
* raise ValueError(u"ndarray is not C contiguous")
*
*/
__pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS) != 0)) != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L6_bool_binop_done:;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":216
* copy_shape = 0
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous")
*/
if (__pyx_t_1) {
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":218
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<<
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 218, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 218, __pyx_L1_error)
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":216
* copy_shape = 0
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous")
*/
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":220
* raise ValueError(u"ndarray is not C contiguous")
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous")
*/
__pyx_t_2 = (((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L9_bool_binop_done;
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":221
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<<
* raise ValueError(u"ndarray is not Fortran contiguous")
*
*/
__pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS) != 0)) != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L9_bool_binop_done:;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":220
* raise ValueError(u"ndarray is not C contiguous")
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous")
*/
if (__pyx_t_1) {
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":222
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<<
*
* info.buf = PyArray_DATA(self)
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 222, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 222, __pyx_L1_error)
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":220
* raise ValueError(u"ndarray is not C contiguous")
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous")
*/
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":224
* raise ValueError(u"ndarray is not Fortran contiguous")
*
* info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<<
* info.ndim = ndim
* if copy_shape:
*/
__pyx_v_info->buf = PyArray_DATA(__pyx_v_self);
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":225
*
* info.buf = PyArray_DATA(self)
* info.ndim = ndim # <<<<<<<<<<<<<<
* if copy_shape:
* # Allocate new buffer for strides and shape info.
*/
__pyx_v_info->ndim = __pyx_v_ndim;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":226
* info.buf = PyArray_DATA(self)
* info.ndim = ndim
* if copy_shape: # <<<<<<<<<<<<<<
* # Allocate new buffer for strides and shape info.
* # This is allocated as one block, strides first.
*/
__pyx_t_1 = (__pyx_v_copy_shape != 0);
if (__pyx_t_1) {
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":229
* # Allocate new buffer for strides and shape info.
* # This is allocated as one block, strides first.
* info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) # <<<<<<<<<<<<<<
* info.shape = info.strides + ndim
* for i in range(ndim):
*/
__pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2)));
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":230
* # This is allocated as one block, strides first.
* info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2)
* info.shape = info.strides + ndim # <<<<<<<<<<<<<<
* for i in range(ndim):
* info.strides[i] = PyArray_STRIDES(self)[i]
*/
__pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim);
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":231
* info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2)
* info.shape = info.strides + ndim
* for i in range(ndim): # <<<<<<<<<<<<<<
* info.strides[i] = PyArray_STRIDES(self)[i]
* info.shape[i] = PyArray_DIMS(self)[i]
*/
__pyx_t_4 = __pyx_v_ndim;
for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) {
__pyx_v_i = __pyx_t_5;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":232
* info.shape = info.strides + ndim
* for i in range(ndim):
* info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<<
* info.shape[i] = PyArray_DIMS(self)[i]
* else:
*/
(__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]);
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":233
* for i in range(ndim):
* info.strides[i] = PyArray_STRIDES(self)[i]
* info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<<
* else:
* info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
*/
(__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]);
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":226
* info.buf = PyArray_DATA(self)
* info.ndim = ndim
* if copy_shape: # <<<<<<<<<<<<<<
* # Allocate new buffer for strides and shape info.
* # This is allocated as one block, strides first.
*/
goto __pyx_L11;
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":235
* info.shape[i] = PyArray_DIMS(self)[i]
* else:
* info.strides = <Py_ssize_t*>PyArray_STRIDES(self) # <<<<<<<<<<<<<<
* info.shape = <Py_ssize_t*>PyArray_DIMS(self)
* info.suboffsets = NULL
*/
/*else*/ {
__pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self));
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":236
* else:
* info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
* info.shape = <Py_ssize_t*>PyArray_DIMS(self) # <<<<<<<<<<<<<<
* info.suboffsets = NULL
* info.itemsize = PyArray_ITEMSIZE(self)
*/
__pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self));
}
__pyx_L11:;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":237
* info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
* info.shape = <Py_ssize_t*>PyArray_DIMS(self)
* info.suboffsets = NULL # <<<<<<<<<<<<<<
* info.itemsize = PyArray_ITEMSIZE(self)
* info.readonly = not PyArray_ISWRITEABLE(self)
*/
__pyx_v_info->suboffsets = NULL;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":238
* info.shape = <Py_ssize_t*>PyArray_DIMS(self)
* info.suboffsets = NULL
* info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<<
* info.readonly = not PyArray_ISWRITEABLE(self)
*
*/
__pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self);
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":239
* info.suboffsets = NULL
* info.itemsize = PyArray_ITEMSIZE(self)
* info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<<
*
* cdef int t
*/
__pyx_v_info->readonly = (!(PyArray_ISWRITEABLE(__pyx_v_self) != 0));
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":242
*
* cdef int t
* cdef char* f = NULL # <<<<<<<<<<<<<<
* cdef dtype descr = self.descr
* cdef int offset
*/
__pyx_v_f = NULL;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":243
* cdef int t
* cdef char* f = NULL
* cdef dtype descr = self.descr # <<<<<<<<<<<<<<
* cdef int offset
*
*/
__pyx_t_3 = ((PyObject *)__pyx_v_self->descr);
__Pyx_INCREF(__pyx_t_3);
__pyx_v_descr = ((PyArray_Descr *)__pyx_t_3);
__pyx_t_3 = 0;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":246
* cdef int offset
*
* cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<<
*
* if not hasfields and not copy_shape:
*/
__pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr);
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":248
* cdef bint hasfields = PyDataType_HASFIELDS(descr)
*
* if not hasfields and not copy_shape: # <<<<<<<<<<<<<<
* # do not call releasebuffer
* info.obj = None
*/
__pyx_t_2 = ((!(__pyx_v_hasfields != 0)) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L15_bool_binop_done;
}
__pyx_t_2 = ((!(__pyx_v_copy_shape != 0)) != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L15_bool_binop_done:;
if (__pyx_t_1) {
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":250
* if not hasfields and not copy_shape:
* # do not call releasebuffer
* info.obj = None # <<<<<<<<<<<<<<
* else:
* # need to call releasebuffer
*/
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = Py_None;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":248
* cdef bint hasfields = PyDataType_HASFIELDS(descr)
*
* if not hasfields and not copy_shape: # <<<<<<<<<<<<<<
* # do not call releasebuffer
* info.obj = None
*/
goto __pyx_L14;
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":253
* else:
* # need to call releasebuffer
* info.obj = self # <<<<<<<<<<<<<<
*
* if not hasfields:
*/
/*else*/ {
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = ((PyObject *)__pyx_v_self);
}
__pyx_L14:;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":255
* info.obj = self
*
* if not hasfields: # <<<<<<<<<<<<<<
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or
*/
__pyx_t_1 = ((!(__pyx_v_hasfields != 0)) != 0);
if (__pyx_t_1) {
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":256
*
* if not hasfields:
* t = descr.type_num # <<<<<<<<<<<<<<
* if ((descr.byteorder == c'>' and little_endian) or
* (descr.byteorder == c'<' and not little_endian)):
*/
__pyx_t_4 = __pyx_v_descr->type_num;
__pyx_v_t = __pyx_t_4;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":257
* if not hasfields:
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
__pyx_t_2 = ((__pyx_v_descr->byteorder == '>') != 0);
if (!__pyx_t_2) {
goto __pyx_L20_next_or;
} else {
}
__pyx_t_2 = (__pyx_v_little_endian != 0);
if (!__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L19_bool_binop_done;
}
__pyx_L20_next_or:;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":258
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or
* (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<<
* raise ValueError(u"Non-native byte order not supported")
* if t == NPY_BYTE: f = "b"
*/
__pyx_t_2 = ((__pyx_v_descr->byteorder == '<') != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L19_bool_binop_done;
}
__pyx_t_2 = ((!(__pyx_v_little_endian != 0)) != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L19_bool_binop_done:;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":257
* if not hasfields:
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
if (__pyx_t_1) {
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":259
* if ((descr.byteorder == c'>' and little_endian) or
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B"
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 259, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 259, __pyx_L1_error)
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":257
* if not hasfields:
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":260
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
* if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<<
* elif t == NPY_UBYTE: f = "B"
* elif t == NPY_SHORT: f = "h"
*/
switch (__pyx_v_t) {
case NPY_BYTE:
__pyx_v_f = ((char *)"b");
break;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":261
* raise ValueError(u"Non-native byte order not supported")
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<<
* elif t == NPY_SHORT: f = "h"
* elif t == NPY_USHORT: f = "H"
*/
case NPY_UBYTE:
__pyx_v_f = ((char *)"B");
break;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":262
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B"
* elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<<
* elif t == NPY_USHORT: f = "H"
* elif t == NPY_INT: f = "i"
*/
case NPY_SHORT:
__pyx_v_f = ((char *)"h");
break;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":263
* elif t == NPY_UBYTE: f = "B"
* elif t == NPY_SHORT: f = "h"
* elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<<
* elif t == NPY_INT: f = "i"
* elif t == NPY_UINT: f = "I"
*/
case NPY_USHORT:
__pyx_v_f = ((char *)"H");
break;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":264
* elif t == NPY_SHORT: f = "h"
* elif t == NPY_USHORT: f = "H"
* elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<<
* elif t == NPY_UINT: f = "I"
* elif t == NPY_LONG: f = "l"
*/
case NPY_INT:
__pyx_v_f = ((char *)"i");
break;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":265
* elif t == NPY_USHORT: f = "H"
* elif t == NPY_INT: f = "i"
* elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<<
* elif t == NPY_LONG: f = "l"
* elif t == NPY_ULONG: f = "L"
*/
case NPY_UINT:
__pyx_v_f = ((char *)"I");
break;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":266
* elif t == NPY_INT: f = "i"
* elif t == NPY_UINT: f = "I"
* elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<<
* elif t == NPY_ULONG: f = "L"
* elif t == NPY_LONGLONG: f = "q"
*/
case NPY_LONG:
__pyx_v_f = ((char *)"l");
break;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":267
* elif t == NPY_UINT: f = "I"
* elif t == NPY_LONG: f = "l"
* elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<<
* elif t == NPY_LONGLONG: f = "q"
* elif t == NPY_ULONGLONG: f = "Q"
*/
case NPY_ULONG:
__pyx_v_f = ((char *)"L");
break;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":268
* elif t == NPY_LONG: f = "l"
* elif t == NPY_ULONG: f = "L"
* elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<<
* elif t == NPY_ULONGLONG: f = "Q"
* elif t == NPY_FLOAT: f = "f"
*/
case NPY_LONGLONG:
__pyx_v_f = ((char *)"q");
break;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":269
* elif t == NPY_ULONG: f = "L"
* elif t == NPY_LONGLONG: f = "q"
* elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<<
* elif t == NPY_FLOAT: f = "f"
* elif t == NPY_DOUBLE: f = "d"
*/
case NPY_ULONGLONG:
__pyx_v_f = ((char *)"Q");
break;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":270
* elif t == NPY_LONGLONG: f = "q"
* elif t == NPY_ULONGLONG: f = "Q"
* elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<<
* elif t == NPY_DOUBLE: f = "d"
* elif t == NPY_LONGDOUBLE: f = "g"
*/
case NPY_FLOAT:
__pyx_v_f = ((char *)"f");
break;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":271
* elif t == NPY_ULONGLONG: f = "Q"
* elif t == NPY_FLOAT: f = "f"
* elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<<
* elif t == NPY_LONGDOUBLE: f = "g"
* elif t == NPY_CFLOAT: f = "Zf"
*/
case NPY_DOUBLE:
__pyx_v_f = ((char *)"d");
break;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":272
* elif t == NPY_FLOAT: f = "f"
* elif t == NPY_DOUBLE: f = "d"
* elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<<
* elif t == NPY_CFLOAT: f = "Zf"
* elif t == NPY_CDOUBLE: f = "Zd"
*/
case NPY_LONGDOUBLE:
__pyx_v_f = ((char *)"g");
break;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":273
* elif t == NPY_DOUBLE: f = "d"
* elif t == NPY_LONGDOUBLE: f = "g"
* elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<<
* elif t == NPY_CDOUBLE: f = "Zd"
* elif t == NPY_CLONGDOUBLE: f = "Zg"
*/
case NPY_CFLOAT:
__pyx_v_f = ((char *)"Zf");
break;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":274
* elif t == NPY_LONGDOUBLE: f = "g"
* elif t == NPY_CFLOAT: f = "Zf"
* elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<<
* elif t == NPY_CLONGDOUBLE: f = "Zg"
* elif t == NPY_OBJECT: f = "O"
*/
case NPY_CDOUBLE:
__pyx_v_f = ((char *)"Zd");
break;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":275
* elif t == NPY_CFLOAT: f = "Zf"
* elif t == NPY_CDOUBLE: f = "Zd"
* elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<<
* elif t == NPY_OBJECT: f = "O"
* else:
*/
case NPY_CLONGDOUBLE:
__pyx_v_f = ((char *)"Zg");
break;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":276
* elif t == NPY_CDOUBLE: f = "Zd"
* elif t == NPY_CLONGDOUBLE: f = "Zg"
* elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<<
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
*/
case NPY_OBJECT:
__pyx_v_f = ((char *)"O");
break;
default:
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":278
* elif t == NPY_OBJECT: f = "O"
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<<
* info.format = f
* return
*/
__pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 278, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_6 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_3); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 278, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 278, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6);
__pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 278, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_6, 0, 0, 0);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__PYX_ERR(1, 278, __pyx_L1_error)
break;
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":279
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
* info.format = f # <<<<<<<<<<<<<<
* return
* else:
*/
__pyx_v_info->format = __pyx_v_f;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":280
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
* info.format = f
* return # <<<<<<<<<<<<<<
* else:
* info.format = <char*>stdlib.malloc(_buffer_format_string_len)
*/
__pyx_r = 0;
goto __pyx_L0;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":255
* info.obj = self
*
* if not hasfields: # <<<<<<<<<<<<<<
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or
*/
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":282
* return
* else:
* info.format = <char*>stdlib.malloc(_buffer_format_string_len) # <<<<<<<<<<<<<<
* info.format[0] = c'^' # Native data types, manual alignment
* offset = 0
*/
/*else*/ {
__pyx_v_info->format = ((char *)malloc(0xFF));
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":283
* else:
* info.format = <char*>stdlib.malloc(_buffer_format_string_len)
* info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<<
* offset = 0
* f = _util_dtypestring(descr, info.format + 1,
*/
(__pyx_v_info->format[0]) = '^';
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":284
* info.format = <char*>stdlib.malloc(_buffer_format_string_len)
* info.format[0] = c'^' # Native data types, manual alignment
* offset = 0 # <<<<<<<<<<<<<<
* f = _util_dtypestring(descr, info.format + 1,
* info.format + _buffer_format_string_len,
*/
__pyx_v_offset = 0;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":285
* info.format[0] = c'^' # Native data types, manual alignment
* offset = 0
* f = _util_dtypestring(descr, info.format + 1, # <<<<<<<<<<<<<<
* info.format + _buffer_format_string_len,
* &offset)
*/
__pyx_t_7 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 0xFF), (&__pyx_v_offset)); if (unlikely(__pyx_t_7 == NULL)) __PYX_ERR(1, 285, __pyx_L1_error)
__pyx_v_f = __pyx_t_7;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":288
* info.format + _buffer_format_string_len,
* &offset)
* f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<<
*
* def __releasebuffer__(ndarray self, Py_buffer* info):
*/
(__pyx_v_f[0]) = '\x00';
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":197
* # experimental exception made for __getbuffer__ and __releasebuffer__
* # -- the details of this may change.
* def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<<
* # This implementation of getbuffer is geared towards Cython
* # requirements, and does not yet fullfill the PEP.
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL;
}
goto __pyx_L2;
__pyx_L0:;
if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) {
__Pyx_GOTREF(Py_None);
__Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL;
}
__pyx_L2:;
__Pyx_XDECREF((PyObject *)__pyx_v_descr);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":290
* f[0] = c'\0' # Terminate format string
*
* def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<<
* if PyArray_HASFIELDS(self):
* stdlib.free(info.format)
*/
/* Python wrapper */
static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/
static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0);
__pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) {
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("__releasebuffer__", 0);
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":291
*
* def __releasebuffer__(ndarray self, Py_buffer* info):
* if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<<
* stdlib.free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
*/
__pyx_t_1 = (PyArray_HASFIELDS(__pyx_v_self) != 0);
if (__pyx_t_1) {
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":292
* def __releasebuffer__(ndarray self, Py_buffer* info):
* if PyArray_HASFIELDS(self):
* stdlib.free(info.format) # <<<<<<<<<<<<<<
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
* stdlib.free(info.strides)
*/
free(__pyx_v_info->format);
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":291
*
* def __releasebuffer__(ndarray self, Py_buffer* info):
* if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<<
* stdlib.free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
*/
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":293
* if PyArray_HASFIELDS(self):
* stdlib.free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<<
* stdlib.free(info.strides)
* # info.shape was stored after info.strides in the same block
*/
__pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0);
if (__pyx_t_1) {
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":294
* stdlib.free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
* stdlib.free(info.strides) # <<<<<<<<<<<<<<
* # info.shape was stored after info.strides in the same block
*
*/
free(__pyx_v_info->strides);
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":293
* if PyArray_HASFIELDS(self):
* stdlib.free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<<
* stdlib.free(info.strides)
* # info.shape was stored after info.strides in the same block
*/
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":290
* f[0] = c'\0' # Terminate format string
*
* def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<<
* if PyArray_HASFIELDS(self):
* stdlib.free(info.format)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":770
* ctypedef npy_cdouble complex_t
*
* cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(1, <void*>a)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0);
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":771
*
* cdef inline object PyArray_MultiIterNew1(a):
* return PyArray_MultiIterNew(1, <void*>a) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew2(a, b):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 771, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":770
* ctypedef npy_cdouble complex_t
*
* cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(1, <void*>a)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":773
* return PyArray_MultiIterNew(1, <void*>a)
*
* cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0);
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":774
*
* cdef inline object PyArray_MultiIterNew2(a, b):
* return PyArray_MultiIterNew(2, <void*>a, <void*>b) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew3(a, b, c):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 774, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":773
* return PyArray_MultiIterNew(1, <void*>a)
*
* cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":776
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
* cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0);
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":777
*
* cdef inline object PyArray_MultiIterNew3(a, b, c):
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 777, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":776
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
* cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":779
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0);
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":780
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d):
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 780, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":779
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":782
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0);
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":783
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) # <<<<<<<<<<<<<<
*
* cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL:
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 783, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":782
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":785
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
* cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<<
* # Recursive utility function used in __getbuffer__ to get format
* # string. The new location in the format string is returned.
*/
static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) {
PyArray_Descr *__pyx_v_child = 0;
int __pyx_v_endian_detector;
int __pyx_v_little_endian;
PyObject *__pyx_v_fields = 0;
PyObject *__pyx_v_childname = NULL;
PyObject *__pyx_v_new_offset = NULL;
PyObject *__pyx_v_t = NULL;
char *__pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
Py_ssize_t __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_t_5;
int __pyx_t_6;
int __pyx_t_7;
long __pyx_t_8;
char *__pyx_t_9;
__Pyx_RefNannySetupContext("_util_dtypestring", 0);
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":790
*
* cdef dtype child
* cdef int endian_detector = 1 # <<<<<<<<<<<<<<
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
* cdef tuple fields
*/
__pyx_v_endian_detector = 1;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":791
* cdef dtype child
* cdef int endian_detector = 1
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<<
* cdef tuple fields
*
*/
__pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0);
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":794
* cdef tuple fields
*
* for childname in descr.names: # <<<<<<<<<<<<<<
* fields = descr.fields[childname]
* child, new_offset = fields
*/
if (unlikely(__pyx_v_descr->names == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
__PYX_ERR(1, 794, __pyx_L1_error)
}
__pyx_t_1 = __pyx_v_descr->names; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0;
for (;;) {
if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) __PYX_ERR(1, 794, __pyx_L1_error)
#else
__pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 794, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
#endif
__Pyx_XDECREF_SET(__pyx_v_childname, __pyx_t_3);
__pyx_t_3 = 0;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":795
*
* for childname in descr.names:
* fields = descr.fields[childname] # <<<<<<<<<<<<<<
* child, new_offset = fields
*
*/
if (unlikely(__pyx_v_descr->fields == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 795, __pyx_L1_error)
}
__pyx_t_3 = __Pyx_PyDict_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 795, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) __PYX_ERR(1, 795, __pyx_L1_error)
__Pyx_XDECREF_SET(__pyx_v_fields, ((PyObject*)__pyx_t_3));
__pyx_t_3 = 0;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":796
* for childname in descr.names:
* fields = descr.fields[childname]
* child, new_offset = fields # <<<<<<<<<<<<<<
*
* if (end - f) - <int>(new_offset - offset[0]) < 15:
*/
if (likely(__pyx_v_fields != Py_None)) {
PyObject* sequence = __pyx_v_fields;
#if !CYTHON_COMPILING_IN_PYPY
Py_ssize_t size = Py_SIZE(sequence);
#else
Py_ssize_t size = PySequence_Size(sequence);
#endif
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
__PYX_ERR(1, 796, __pyx_L1_error)
}
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_3 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_4 = PyTuple_GET_ITEM(sequence, 1);
__Pyx_INCREF(__pyx_t_3);
__Pyx_INCREF(__pyx_t_4);
#else
__pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 796, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 796, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
#endif
} else {
__Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 796, __pyx_L1_error)
}
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) __PYX_ERR(1, 796, __pyx_L1_error)
__Pyx_XDECREF_SET(__pyx_v_child, ((PyArray_Descr *)__pyx_t_3));
__pyx_t_3 = 0;
__Pyx_XDECREF_SET(__pyx_v_new_offset, __pyx_t_4);
__pyx_t_4 = 0;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":798
* child, new_offset = fields
*
* if (end - f) - <int>(new_offset - offset[0]) < 15: # <<<<<<<<<<<<<<
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
*
*/
__pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 798, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 798, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 798, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = ((((__pyx_v_end - __pyx_v_f) - ((int)__pyx_t_5)) < 15) != 0);
if (__pyx_t_6) {
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":799
*
* if (end - f) - <int>(new_offset - offset[0]) < 15:
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<<
*
* if ((child.byteorder == c'>' and little_endian) or
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 799, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 799, __pyx_L1_error)
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":798
* child, new_offset = fields
*
* if (end - f) - <int>(new_offset - offset[0]) < 15: # <<<<<<<<<<<<<<
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
*
*/
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":801
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
*
* if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (child.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
__pyx_t_7 = ((__pyx_v_child->byteorder == '>') != 0);
if (!__pyx_t_7) {
goto __pyx_L8_next_or;
} else {
}
__pyx_t_7 = (__pyx_v_little_endian != 0);
if (!__pyx_t_7) {
} else {
__pyx_t_6 = __pyx_t_7;
goto __pyx_L7_bool_binop_done;
}
__pyx_L8_next_or:;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":802
*
* if ((child.byteorder == c'>' and little_endian) or
* (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<<
* raise ValueError(u"Non-native byte order not supported")
* # One could encode it in the format string and have Cython
*/
__pyx_t_7 = ((__pyx_v_child->byteorder == '<') != 0);
if (__pyx_t_7) {
} else {
__pyx_t_6 = __pyx_t_7;
goto __pyx_L7_bool_binop_done;
}
__pyx_t_7 = ((!(__pyx_v_little_endian != 0)) != 0);
__pyx_t_6 = __pyx_t_7;
__pyx_L7_bool_binop_done:;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":801
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
*
* if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (child.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
if (__pyx_t_6) {
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":803
* if ((child.byteorder == c'>' and little_endian) or
* (child.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
* # One could encode it in the format string and have Cython
* # complain instead, BUT: < and > in format strings also imply
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 803, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 803, __pyx_L1_error)
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":801
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
*
* if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (child.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":813
*
* # Output padding bytes
* while offset[0] < new_offset: # <<<<<<<<<<<<<<
* f[0] = 120 # "x"; pad byte
* f += 1
*/
while (1) {
__pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 813, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 813, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 813, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (!__pyx_t_6) break;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":814
* # Output padding bytes
* while offset[0] < new_offset:
* f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<<
* f += 1
* offset[0] += 1
*/
(__pyx_v_f[0]) = 0x78;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":815
* while offset[0] < new_offset:
* f[0] = 120 # "x"; pad byte
* f += 1 # <<<<<<<<<<<<<<
* offset[0] += 1
*
*/
__pyx_v_f = (__pyx_v_f + 1);
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":816
* f[0] = 120 # "x"; pad byte
* f += 1
* offset[0] += 1 # <<<<<<<<<<<<<<
*
* offset[0] += child.itemsize
*/
__pyx_t_8 = 0;
(__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + 1);
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":818
* offset[0] += 1
*
* offset[0] += child.itemsize # <<<<<<<<<<<<<<
*
* if not PyDataType_HASFIELDS(child):
*/
__pyx_t_8 = 0;
(__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + __pyx_v_child->elsize);
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":820
* offset[0] += child.itemsize
*
* if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<<
* t = child.type_num
* if end - f < 5:
*/
__pyx_t_6 = ((!(PyDataType_HASFIELDS(__pyx_v_child) != 0)) != 0);
if (__pyx_t_6) {
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":821
*
* if not PyDataType_HASFIELDS(child):
* t = child.type_num # <<<<<<<<<<<<<<
* if end - f < 5:
* raise RuntimeError(u"Format string allocated too short.")
*/
__pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 821, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_XDECREF_SET(__pyx_v_t, __pyx_t_4);
__pyx_t_4 = 0;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":822
* if not PyDataType_HASFIELDS(child):
* t = child.type_num
* if end - f < 5: # <<<<<<<<<<<<<<
* raise RuntimeError(u"Format string allocated too short.")
*
*/
__pyx_t_6 = (((__pyx_v_end - __pyx_v_f) < 5) != 0);
if (__pyx_t_6) {
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":823
* t = child.type_num
* if end - f < 5:
* raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<<
*
* # Until ticket #99 is fixed, use integers to avoid warnings
*/
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 823, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_Raise(__pyx_t_4, 0, 0, 0);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__PYX_ERR(1, 823, __pyx_L1_error)
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":822
* if not PyDataType_HASFIELDS(child):
* t = child.type_num
* if end - f < 5: # <<<<<<<<<<<<<<
* raise RuntimeError(u"Format string allocated too short.")
*
*/
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":826
*
* # Until ticket #99 is fixed, use integers to avoid warnings
* if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<<
* elif t == NPY_UBYTE: f[0] = 66 #"B"
* elif t == NPY_SHORT: f[0] = 104 #"h"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_BYTE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 826, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 826, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 826, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 98;
goto __pyx_L15;
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":827
* # Until ticket #99 is fixed, use integers to avoid warnings
* if t == NPY_BYTE: f[0] = 98 #"b"
* elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<<
* elif t == NPY_SHORT: f[0] = 104 #"h"
* elif t == NPY_USHORT: f[0] = 72 #"H"
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UBYTE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 827, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 827, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 827, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 66;
goto __pyx_L15;
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":828
* if t == NPY_BYTE: f[0] = 98 #"b"
* elif t == NPY_UBYTE: f[0] = 66 #"B"
* elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<<
* elif t == NPY_USHORT: f[0] = 72 #"H"
* elif t == NPY_INT: f[0] = 105 #"i"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_SHORT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 828, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 828, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 828, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x68;
goto __pyx_L15;
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":829
* elif t == NPY_UBYTE: f[0] = 66 #"B"
* elif t == NPY_SHORT: f[0] = 104 #"h"
* elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<<
* elif t == NPY_INT: f[0] = 105 #"i"
* elif t == NPY_UINT: f[0] = 73 #"I"
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_USHORT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 829, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 829, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 829, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 72;
goto __pyx_L15;
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":830
* elif t == NPY_SHORT: f[0] = 104 #"h"
* elif t == NPY_USHORT: f[0] = 72 #"H"
* elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<<
* elif t == NPY_UINT: f[0] = 73 #"I"
* elif t == NPY_LONG: f[0] = 108 #"l"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_INT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 830, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 830, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 830, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x69;
goto __pyx_L15;
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":831
* elif t == NPY_USHORT: f[0] = 72 #"H"
* elif t == NPY_INT: f[0] = 105 #"i"
* elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<<
* elif t == NPY_LONG: f[0] = 108 #"l"
* elif t == NPY_ULONG: f[0] = 76 #"L"
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UINT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 831, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 831, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 831, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 73;
goto __pyx_L15;
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":832
* elif t == NPY_INT: f[0] = 105 #"i"
* elif t == NPY_UINT: f[0] = 73 #"I"
* elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<<
* elif t == NPY_ULONG: f[0] = 76 #"L"
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 832, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 832, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 832, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x6C;
goto __pyx_L15;
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":833
* elif t == NPY_UINT: f[0] = 73 #"I"
* elif t == NPY_LONG: f[0] = 108 #"l"
* elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<<
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 833, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 833, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 833, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 76;
goto __pyx_L15;
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":834
* elif t == NPY_LONG: f[0] = 108 #"l"
* elif t == NPY_ULONG: f[0] = 76 #"L"
* elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<<
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
* elif t == NPY_FLOAT: f[0] = 102 #"f"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 834, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 834, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 834, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x71;
goto __pyx_L15;
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":835
* elif t == NPY_ULONG: f[0] = 76 #"L"
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<<
* elif t == NPY_FLOAT: f[0] = 102 #"f"
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 835, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 835, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 835, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 81;
goto __pyx_L15;
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":836
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
* elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<<
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_FLOAT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 836, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 836, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 836, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x66;
goto __pyx_L15;
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":837
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
* elif t == NPY_FLOAT: f[0] = 102 #"f"
* elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<<
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 837, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 837, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 837, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x64;
goto __pyx_L15;
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":838
* elif t == NPY_FLOAT: f[0] = 102 #"f"
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<<
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 838, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 838, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 838, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x67;
goto __pyx_L15;
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":839
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<<
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 839, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 839, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 839, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 90;
(__pyx_v_f[1]) = 0x66;
__pyx_v_f = (__pyx_v_f + 1);
goto __pyx_L15;
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":840
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<<
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
* elif t == NPY_OBJECT: f[0] = 79 #"O"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 840, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 840, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 840, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 90;
(__pyx_v_f[1]) = 0x64;
__pyx_v_f = (__pyx_v_f + 1);
goto __pyx_L15;
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":841
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<<
* elif t == NPY_OBJECT: f[0] = 79 #"O"
* else:
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 841, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 841, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 841, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 90;
(__pyx_v_f[1]) = 0x67;
__pyx_v_f = (__pyx_v_f + 1);
goto __pyx_L15;
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":842
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
* elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<<
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_OBJECT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 842, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 842, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 842, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 79;
goto __pyx_L15;
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":844
* elif t == NPY_OBJECT: f[0] = 79 #"O"
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<<
* f += 1
* else:
*/
/*else*/ {
__pyx_t_3 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 844, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 844, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3);
__pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 844, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 844, __pyx_L1_error)
}
__pyx_L15:;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":845
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
* f += 1 # <<<<<<<<<<<<<<
* else:
* # Cython ignores struct boundary information ("T{...}"),
*/
__pyx_v_f = (__pyx_v_f + 1);
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":820
* offset[0] += child.itemsize
*
* if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<<
* t = child.type_num
* if end - f < 5:
*/
goto __pyx_L13;
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":849
* # Cython ignores struct boundary information ("T{...}"),
* # so don't output it
* f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<<
* return f
*
*/
/*else*/ {
__pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_9 == NULL)) __PYX_ERR(1, 849, __pyx_L1_error)
__pyx_v_f = __pyx_t_9;
}
__pyx_L13:;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":794
* cdef tuple fields
*
* for childname in descr.names: # <<<<<<<<<<<<<<
* fields = descr.fields[childname]
* child, new_offset = fields
*/
}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":850
* # so don't output it
* f = _util_dtypestring(child, f, end, offset)
* return f # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_f;
goto __pyx_L0;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":785
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
* cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<<
* # Recursive utility function used in __getbuffer__ to get format
* # string. The new location in the format string is returned.
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_child);
__Pyx_XDECREF(__pyx_v_fields);
__Pyx_XDECREF(__pyx_v_childname);
__Pyx_XDECREF(__pyx_v_new_offset);
__Pyx_XDECREF(__pyx_v_t);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":966
*
*
* cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<<
* cdef PyObject* baseptr
* if base is None:
*/
static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) {
PyObject *__pyx_v_baseptr;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
__Pyx_RefNannySetupContext("set_array_base", 0);
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":968
* cdef inline void set_array_base(ndarray arr, object base):
* cdef PyObject* baseptr
* if base is None: # <<<<<<<<<<<<<<
* baseptr = NULL
* else:
*/
__pyx_t_1 = (__pyx_v_base == Py_None);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":969
* cdef PyObject* baseptr
* if base is None:
* baseptr = NULL # <<<<<<<<<<<<<<
* else:
* Py_INCREF(base) # important to do this before decref below!
*/
__pyx_v_baseptr = NULL;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":968
* cdef inline void set_array_base(ndarray arr, object base):
* cdef PyObject* baseptr
* if base is None: # <<<<<<<<<<<<<<
* baseptr = NULL
* else:
*/
goto __pyx_L3;
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":971
* baseptr = NULL
* else:
* Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<<
* baseptr = <PyObject*>base
* Py_XDECREF(arr.base)
*/
/*else*/ {
Py_INCREF(__pyx_v_base);
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":972
* else:
* Py_INCREF(base) # important to do this before decref below!
* baseptr = <PyObject*>base # <<<<<<<<<<<<<<
* Py_XDECREF(arr.base)
* arr.base = baseptr
*/
__pyx_v_baseptr = ((PyObject *)__pyx_v_base);
}
__pyx_L3:;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":973
* Py_INCREF(base) # important to do this before decref below!
* baseptr = <PyObject*>base
* Py_XDECREF(arr.base) # <<<<<<<<<<<<<<
* arr.base = baseptr
*
*/
Py_XDECREF(__pyx_v_arr->base);
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":974
* baseptr = <PyObject*>base
* Py_XDECREF(arr.base)
* arr.base = baseptr # <<<<<<<<<<<<<<
*
* cdef inline object get_array_base(ndarray arr):
*/
__pyx_v_arr->base = __pyx_v_baseptr;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":966
*
*
* cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<<
* cdef PyObject* baseptr
* if base is None:
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":976
* arr.base = baseptr
*
* cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<<
* if arr.base is NULL:
* return None
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("get_array_base", 0);
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":977
*
* cdef inline object get_array_base(ndarray arr):
* if arr.base is NULL: # <<<<<<<<<<<<<<
* return None
* else:
*/
__pyx_t_1 = ((__pyx_v_arr->base == NULL) != 0);
if (__pyx_t_1) {
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":978
* cdef inline object get_array_base(ndarray arr):
* if arr.base is NULL:
* return None # <<<<<<<<<<<<<<
* else:
* return <object>arr.base
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(Py_None);
__pyx_r = Py_None;
goto __pyx_L0;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":977
*
* cdef inline object get_array_base(ndarray arr):
* if arr.base is NULL: # <<<<<<<<<<<<<<
* return None
* else:
*/
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":980
* return None
* else:
* return <object>arr.base # <<<<<<<<<<<<<<
*
*
*/
/*else*/ {
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_arr->base));
__pyx_r = ((PyObject *)__pyx_v_arr->base);
goto __pyx_L0;
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":976
* arr.base = baseptr
*
* cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<<
* if arr.base is NULL:
* return None
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":985
* # Versions of the import_* functions which are more suitable for
* # Cython code.
* cdef inline int import_array() except -1: # <<<<<<<<<<<<<<
* try:
* _import_array()
*/
static CYTHON_INLINE int __pyx_f_5numpy_import_array(void) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
__Pyx_RefNannySetupContext("import_array", 0);
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":986
* # Cython code.
* cdef inline int import_array() except -1:
* try: # <<<<<<<<<<<<<<
* _import_array()
* except Exception:
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_1);
__Pyx_XGOTREF(__pyx_t_2);
__Pyx_XGOTREF(__pyx_t_3);
/*try:*/ {
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":987
* cdef inline int import_array() except -1:
* try:
* _import_array() # <<<<<<<<<<<<<<
* except Exception:
* raise ImportError("numpy.core.multiarray failed to import")
*/
__pyx_t_4 = _import_array(); if (unlikely(__pyx_t_4 == -1)) __PYX_ERR(1, 987, __pyx_L3_error)
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":986
* # Cython code.
* cdef inline int import_array() except -1:
* try: # <<<<<<<<<<<<<<
* _import_array()
* except Exception:
*/
}
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
goto __pyx_L8_try_end;
__pyx_L3_error:;
__Pyx_PyThreadState_assign
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":988
* try:
* _import_array()
* except Exception: # <<<<<<<<<<<<<<
* raise ImportError("numpy.core.multiarray failed to import")
*
*/
__pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0])));
if (__pyx_t_4) {
__Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 988, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GOTREF(__pyx_t_7);
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":989
* _import_array()
* except Exception:
* raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<<
*
* cdef inline int import_umath() except -1:
*/
__pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 989, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_Raise(__pyx_t_8, 0, 0, 0);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__PYX_ERR(1, 989, __pyx_L5_except_error)
}
goto __pyx_L5_except_error;
__pyx_L5_except_error:;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":986
* # Cython code.
* cdef inline int import_array() except -1:
* try: # <<<<<<<<<<<<<<
* _import_array()
* except Exception:
*/
__Pyx_PyThreadState_assign
__Pyx_XGIVEREF(__pyx_t_1);
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3);
goto __pyx_L1_error;
__pyx_L8_try_end:;
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":985
* # Versions of the import_* functions which are more suitable for
* # Cython code.
* cdef inline int import_array() except -1: # <<<<<<<<<<<<<<
* try:
* _import_array()
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":991
* raise ImportError("numpy.core.multiarray failed to import")
*
* cdef inline int import_umath() except -1: # <<<<<<<<<<<<<<
* try:
* _import_umath()
*/
static CYTHON_INLINE int __pyx_f_5numpy_import_umath(void) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
__Pyx_RefNannySetupContext("import_umath", 0);
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":992
*
* cdef inline int import_umath() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_1);
__Pyx_XGOTREF(__pyx_t_2);
__Pyx_XGOTREF(__pyx_t_3);
/*try:*/ {
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":993
* cdef inline int import_umath() except -1:
* try:
* _import_umath() # <<<<<<<<<<<<<<
* except Exception:
* raise ImportError("numpy.core.umath failed to import")
*/
__pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == -1)) __PYX_ERR(1, 993, __pyx_L3_error)
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":992
*
* cdef inline int import_umath() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
}
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
goto __pyx_L8_try_end;
__pyx_L3_error:;
__Pyx_PyThreadState_assign
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":994
* try:
* _import_umath()
* except Exception: # <<<<<<<<<<<<<<
* raise ImportError("numpy.core.umath failed to import")
*
*/
__pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0])));
if (__pyx_t_4) {
__Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 994, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GOTREF(__pyx_t_7);
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":995
* _import_umath()
* except Exception:
* raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<<
*
* cdef inline int import_ufunc() except -1:
*/
__pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 995, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_Raise(__pyx_t_8, 0, 0, 0);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__PYX_ERR(1, 995, __pyx_L5_except_error)
}
goto __pyx_L5_except_error;
__pyx_L5_except_error:;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":992
*
* cdef inline int import_umath() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
__Pyx_PyThreadState_assign
__Pyx_XGIVEREF(__pyx_t_1);
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3);
goto __pyx_L1_error;
__pyx_L8_try_end:;
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":991
* raise ImportError("numpy.core.multiarray failed to import")
*
* cdef inline int import_umath() except -1: # <<<<<<<<<<<<<<
* try:
* _import_umath()
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":997
* raise ImportError("numpy.core.umath failed to import")
*
* cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<<
* try:
* _import_umath()
*/
static CYTHON_INLINE int __pyx_f_5numpy_import_ufunc(void) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
__Pyx_RefNannySetupContext("import_ufunc", 0);
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":998
*
* cdef inline int import_ufunc() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_1);
__Pyx_XGOTREF(__pyx_t_2);
__Pyx_XGOTREF(__pyx_t_3);
/*try:*/ {
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":999
* cdef inline int import_ufunc() except -1:
* try:
* _import_umath() # <<<<<<<<<<<<<<
* except Exception:
* raise ImportError("numpy.core.umath failed to import")
*/
__pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == -1)) __PYX_ERR(1, 999, __pyx_L3_error)
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":998
*
* cdef inline int import_ufunc() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
}
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
goto __pyx_L8_try_end;
__pyx_L3_error:;
__Pyx_PyThreadState_assign
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":1000
* try:
* _import_umath()
* except Exception: # <<<<<<<<<<<<<<
* raise ImportError("numpy.core.umath failed to import")
*/
__pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0])));
if (__pyx_t_4) {
__Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1000, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GOTREF(__pyx_t_7);
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":1001
* _import_umath()
* except Exception:
* raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<<
*/
__pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1001, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_Raise(__pyx_t_8, 0, 0, 0);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__PYX_ERR(1, 1001, __pyx_L5_except_error)
}
goto __pyx_L5_except_error;
__pyx_L5_except_error:;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":998
*
* cdef inline int import_ufunc() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
__Pyx_PyThreadState_assign
__Pyx_XGIVEREF(__pyx_t_1);
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3);
goto __pyx_L1_error;
__pyx_L8_try_end:;
}
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":997
* raise ImportError("numpy.core.umath failed to import")
*
* cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<<
* try:
* _import_umath()
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyMethodDef __pyx_methods[] = {
{0, 0, 0, 0}
};
#if PY_MAJOR_VERSION >= 3
static struct PyModuleDef __pyx_moduledef = {
#if PY_VERSION_HEX < 0x03020000
{ PyObject_HEAD_INIT(NULL) NULL, 0, NULL },
#else
PyModuleDef_HEAD_INIT,
#endif
"DX",
0, /* m_doc */
-1, /* m_size */
__pyx_methods /* m_methods */,
NULL, /* m_reload */
NULL, /* m_traverse */
NULL, /* m_clear */
NULL /* m_free */
};
#endif
static __Pyx_StringTabEntry __pyx_string_tab[] = {
{&__pyx_n_s_DTYPE, __pyx_k_DTYPE, sizeof(__pyx_k_DTYPE), 0, 0, 1, 1},
{&__pyx_n_s_DX, __pyx_k_DX, sizeof(__pyx_k_DX), 0, 0, 1, 1},
{&__pyx_n_s_DX_h, __pyx_k_DX_h, sizeof(__pyx_k_DX_h), 0, 0, 1, 1},
{&__pyx_kp_s_DX_pyx, __pyx_k_DX_pyx, sizeof(__pyx_k_DX_pyx), 0, 0, 1, 0},
{&__pyx_n_s_D_n, __pyx_k_D_n, sizeof(__pyx_k_D_n), 0, 0, 1, 1},
{&__pyx_kp_u_Format_string_allocated_too_shor, __pyx_k_Format_string_allocated_too_shor, sizeof(__pyx_k_Format_string_allocated_too_shor), 0, 1, 0, 0},
{&__pyx_kp_u_Format_string_allocated_too_shor_2, __pyx_k_Format_string_allocated_too_shor_2, sizeof(__pyx_k_Format_string_allocated_too_shor_2), 0, 1, 0, 0},
{&__pyx_n_s_ImportError, __pyx_k_ImportError, sizeof(__pyx_k_ImportError), 0, 0, 1, 1},
{&__pyx_n_s_L, __pyx_k_L, sizeof(__pyx_k_L), 0, 0, 1, 1},
{&__pyx_kp_u_Non_native_byte_order_not_suppor, __pyx_k_Non_native_byte_order_not_suppor, sizeof(__pyx_k_Non_native_byte_order_not_suppor), 0, 1, 0, 0},
{&__pyx_n_s_Pr, __pyx_k_Pr, sizeof(__pyx_k_Pr), 0, 0, 1, 1},
{&__pyx_n_s_Pr_n, __pyx_k_Pr_n, sizeof(__pyx_k_Pr_n), 0, 0, 1, 1},
{&__pyx_n_s_RuntimeError, __pyx_k_RuntimeError, sizeof(__pyx_k_RuntimeError), 0, 0, 1, 1},
{&__pyx_n_s_U, __pyx_k_U, sizeof(__pyx_k_U), 0, 0, 1, 1},
{&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1},
{&__pyx_n_s_X_0, __pyx_k_X_0, sizeof(__pyx_k_X_0), 0, 0, 1, 1},
{&__pyx_n_s_alpha, __pyx_k_alpha, sizeof(__pyx_k_alpha), 0, 0, 1, 1},
{&__pyx_n_s_aux1, __pyx_k_aux1, sizeof(__pyx_k_aux1), 0, 0, 1, 1},
{&__pyx_n_s_aux2, __pyx_k_aux2, sizeof(__pyx_k_aux2), 0, 0, 1, 1},
{&__pyx_n_s_beta, __pyx_k_beta, sizeof(__pyx_k_beta), 0, 0, 1, 1},
{&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1},
{&__pyx_n_s_cython_DX_h, __pyx_k_cython_DX_h, sizeof(__pyx_k_cython_DX_h), 0, 0, 1, 1},
{&__pyx_n_s_cython_DX_n, __pyx_k_cython_DX_n, sizeof(__pyx_k_cython_DX_n), 0, 0, 1, 1},
{&__pyx_n_s_delta, __pyx_k_delta, sizeof(__pyx_k_delta), 0, 0, 1, 1},
{&__pyx_n_s_delta_mn, __pyx_k_delta_mn, sizeof(__pyx_k_delta_mn), 0, 0, 1, 1},
{&__pyx_n_s_dtype, __pyx_k_dtype, sizeof(__pyx_k_dtype), 0, 0, 1, 1},
{&__pyx_n_s_float, __pyx_k_float, sizeof(__pyx_k_float), 0, 0, 1, 1},
{&__pyx_n_s_gap, __pyx_k_gap, sizeof(__pyx_k_gap), 0, 0, 1, 1},
{&__pyx_n_s_genflux_sectors, __pyx_k_genflux_sectors, sizeof(__pyx_k_genflux_sectors), 0, 0, 1, 1},
{&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1},
{&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1},
{&__pyx_n_s_j, __pyx_k_j, sizeof(__pyx_k_j), 0, 0, 1, 1},
{&__pyx_n_s_k, __pyx_k_k, sizeof(__pyx_k_k), 0, 0, 1, 1},
{&__pyx_n_s_lamda, __pyx_k_lamda, sizeof(__pyx_k_lamda), 0, 0, 1, 1},
{&__pyx_n_s_m, __pyx_k_m, sizeof(__pyx_k_m), 0, 0, 1, 1},
{&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1},
{&__pyx_n_s_n, __pyx_k_n, sizeof(__pyx_k_n), 0, 0, 1, 1},
{&__pyx_n_s_nSectors, __pyx_k_nSectors, sizeof(__pyx_k_nSectors), 0, 0, 1, 1},
{&__pyx_n_s_nZones, __pyx_k_nZones, sizeof(__pyx_k_nZones), 0, 0, 1, 1},
{&__pyx_kp_u_ndarray_is_not_C_contiguous, __pyx_k_ndarray_is_not_C_contiguous, sizeof(__pyx_k_ndarray_is_not_C_contiguous), 0, 1, 0, 0},
{&__pyx_kp_u_ndarray_is_not_Fortran_contiguou, __pyx_k_ndarray_is_not_Fortran_contiguou, sizeof(__pyx_k_ndarray_is_not_Fortran_contiguou), 0, 1, 0, 0},
{&__pyx_n_s_ni, __pyx_k_ni, sizeof(__pyx_k_ni), 0, 0, 1, 1},
{&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1},
{&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1},
{&__pyx_kp_s_numpy_core_multiarray_failed_to, __pyx_k_numpy_core_multiarray_failed_to, sizeof(__pyx_k_numpy_core_multiarray_failed_to), 0, 0, 1, 0},
{&__pyx_kp_s_numpy_core_umath_failed_to_impor, __pyx_k_numpy_core_umath_failed_to_impor, sizeof(__pyx_k_numpy_core_umath_failed_to_impor), 0, 0, 1, 0},
{&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1},
{&__pyx_n_s_sum_i, __pyx_k_sum_i, sizeof(__pyx_k_sum_i), 0, 0, 1, 1},
{&__pyx_n_s_sum_mi, __pyx_k_sum_mi, sizeof(__pyx_k_sum_mi), 0, 0, 1, 1},
{&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1},
{&__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_k_unknown_dtype_code_in_numpy_pxd, sizeof(__pyx_k_unknown_dtype_code_in_numpy_pxd), 0, 1, 0, 0},
{&__pyx_n_s_xrange, __pyx_k_xrange, sizeof(__pyx_k_xrange), 0, 0, 1, 1},
{&__pyx_n_s_zeros, __pyx_k_zeros, sizeof(__pyx_k_zeros), 0, 0, 1, 1},
{0, 0, 0, 0, 0, 0, 0}
};
static int __Pyx_InitCachedBuiltins(void) {
#if PY_MAJOR_VERSION >= 3
__pyx_builtin_xrange = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_xrange) __PYX_ERR(0, 50, __pyx_L1_error)
#else
__pyx_builtin_xrange = __Pyx_GetBuiltinName(__pyx_n_s_xrange); if (!__pyx_builtin_xrange) __PYX_ERR(0, 50, __pyx_L1_error)
#endif
__pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 218, __pyx_L1_error)
__pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(1, 231, __pyx_L1_error)
__pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) __PYX_ERR(1, 799, __pyx_L1_error)
__pyx_builtin_ImportError = __Pyx_GetBuiltinName(__pyx_n_s_ImportError); if (!__pyx_builtin_ImportError) __PYX_ERR(1, 989, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
static int __Pyx_InitCachedConstants(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0);
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":218
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<<
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
*/
__pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_C_contiguous); if (unlikely(!__pyx_tuple_)) __PYX_ERR(1, 218, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple_);
__Pyx_GIVEREF(__pyx_tuple_);
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":222
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<<
*
* info.buf = PyArray_DATA(self)
*/
__pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_Fortran_contiguou); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(1, 222, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__2);
__Pyx_GIVEREF(__pyx_tuple__2);
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":259
* if ((descr.byteorder == c'>' and little_endian) or
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B"
*/
__pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(1, 259, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__3);
__Pyx_GIVEREF(__pyx_tuple__3);
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":799
*
* if (end - f) - <int>(new_offset - offset[0]) < 15:
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<<
*
* if ((child.byteorder == c'>' and little_endian) or
*/
__pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 799, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__4);
__Pyx_GIVEREF(__pyx_tuple__4);
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":803
* if ((child.byteorder == c'>' and little_endian) or
* (child.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
* # One could encode it in the format string and have Cython
* # complain instead, BUT: < and > in format strings also imply
*/
__pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 803, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__5);
__Pyx_GIVEREF(__pyx_tuple__5);
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":823
* t = child.type_num
* if end - f < 5:
* raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<<
*
* # Until ticket #99 is fixed, use integers to avoid warnings
*/
__pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor_2); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 823, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__6);
__Pyx_GIVEREF(__pyx_tuple__6);
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":989
* _import_array()
* except Exception:
* raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<<
*
* cdef inline int import_umath() except -1:
*/
__pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_multiarray_failed_to); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 989, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__7);
__Pyx_GIVEREF(__pyx_tuple__7);
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":995
* _import_umath()
* except Exception:
* raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<<
*
* cdef inline int import_ufunc() except -1:
*/
__pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_umath_failed_to_impor); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 995, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__8);
__Pyx_GIVEREF(__pyx_tuple__8);
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":1001
* _import_umath()
* except Exception:
* raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<<
*/
__pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_umath_failed_to_impor); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 1001, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__9);
__Pyx_GIVEREF(__pyx_tuple__9);
/* "DX.pyx":19
*
* @cython.boundscheck(False) # turn of bounds-checking for entire function
* def cython_DX_h(c_np.ndarray[DTYPE_t, ndim=3] Pr, # <<<<<<<<<<<<<<
* c_np.ndarray[DTYPE_t, ndim=1] lamda,
* c_np.ndarray[DTYPE_t, ndim=1] beta,
*/
__pyx_tuple__10 = PyTuple_Pack(23, __pyx_n_s_Pr, __pyx_n_s_lamda, __pyx_n_s_beta, __pyx_n_s_U, __pyx_n_s_alpha, __pyx_n_s_gap, __pyx_n_s_delta, __pyx_n_s_X_0, __pyx_n_s_nSectors, __pyx_n_s_nZones, __pyx_n_s_genflux_sectors, __pyx_n_s_DX_h, __pyx_n_s_sum_mi, __pyx_n_s_aux1, __pyx_n_s_aux2, __pyx_n_s_delta_mn, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_k, __pyx_n_s_m, __pyx_n_s_ni, __pyx_n_s_L, __pyx_n_s_n); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(0, 19, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__10);
__Pyx_GIVEREF(__pyx_tuple__10);
__pyx_codeobj__11 = (PyObject*)__Pyx_PyCode_New(11, 0, 23, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__10, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_DX_pyx, __pyx_n_s_cython_DX_h, 19, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__11)) __PYX_ERR(0, 19, __pyx_L1_error)
/* "DX.pyx":82
*
* @cython.boundscheck(False) # turn of bounds-checking for entire function
* def cython_DX_n(c_np.ndarray[DTYPE_t, ndim=2] DX, # <<<<<<<<<<<<<<
* int nSectors,
* int nZones,
*/
__pyx_tuple__12 = PyTuple_Pack(11, __pyx_n_s_DX, __pyx_n_s_nSectors, __pyx_n_s_nZones, __pyx_n_s_beta, __pyx_n_s_lamda, __pyx_n_s_D_n, __pyx_n_s_Pr_n, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_k, __pyx_n_s_sum_i); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(0, 82, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__12);
__Pyx_GIVEREF(__pyx_tuple__12);
__pyx_codeobj__13 = (PyObject*)__Pyx_PyCode_New(7, 0, 11, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__12, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_DX_pyx, __pyx_n_s_cython_DX_n, 82, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__13)) __PYX_ERR(0, 82, __pyx_L1_error)
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_RefNannyFinishContext();
return -1;
}
static int __Pyx_InitGlobals(void) {
/* InitThreads.init */
#ifdef WITH_THREAD
PyEval_InitThreads();
#endif
if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error)
if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
return 0;
__pyx_L1_error:;
return -1;
}
#if PY_MAJOR_VERSION < 3
PyMODINIT_FUNC initDX(void); /*proto*/
PyMODINIT_FUNC initDX(void)
#else
PyMODINIT_FUNC PyInit_DX(void); /*proto*/
PyMODINIT_FUNC PyInit_DX(void)
#endif
{
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
__Pyx_RefNannyDeclarations
#if CYTHON_REFNANNY
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
if (!__Pyx_RefNanny) {
PyErr_Clear();
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
if (!__Pyx_RefNanny)
Py_FatalError("failed to import 'refnanny' module");
}
#endif
__Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_DX(void)", 0);
if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error)
#ifdef __Pyx_CyFunction_USED
if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_FusedFunction_USED
if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Coroutine_USED
if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Generator_USED
if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_StopAsyncIteration_USED
if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/*--- Library function declarations ---*/
/*--- Threads initialization code ---*/
#if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
#ifdef WITH_THREAD /* Python build with threading support? */
PyEval_InitThreads();
#endif
#endif
/*--- Module creation code ---*/
#if PY_MAJOR_VERSION < 3
__pyx_m = Py_InitModule4("DX", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m);
#else
__pyx_m = PyModule_Create(&__pyx_moduledef);
#endif
if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_d);
__pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error)
#if CYTHON_COMPILING_IN_PYPY
Py_INCREF(__pyx_b);
#endif
if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
/*--- Initialize various global constants etc. ---*/
if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
if (__pyx_module_is_main_DX) {
if (PyObject_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
}
#if PY_MAJOR_VERSION >= 3
{
PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error)
if (!PyDict_GetItemString(modules, "DX")) {
if (unlikely(PyDict_SetItemString(modules, "DX", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
}
}
#endif
/*--- Builtin init code ---*/
if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
/*--- Constants init code ---*/
if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
/*--- Global init code ---*/
/*--- Variable export code ---*/
/*--- Function export code ---*/
/*--- Type init code ---*/
/*--- Type import code ---*/
__pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type",
#if CYTHON_COMPILING_IN_PYPY
sizeof(PyTypeObject),
#else
sizeof(PyHeapTypeObject),
#endif
0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) __PYX_ERR(2, 9, __pyx_L1_error)
__pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) __PYX_ERR(1, 155, __pyx_L1_error)
__pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) __PYX_ERR(1, 168, __pyx_L1_error)
__pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) __PYX_ERR(1, 172, __pyx_L1_error)
__pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) __PYX_ERR(1, 181, __pyx_L1_error)
__pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) __PYX_ERR(1, 861, __pyx_L1_error)
/*--- Variable import code ---*/
/*--- Function import code ---*/
/*--- Execution code ---*/
#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)
if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/* "DX.pyx":4
* from cython.parallel cimport prange
*
* import numpy as np # <<<<<<<<<<<<<<
* cimport numpy as c_np
*
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) __PYX_ERR(0, 4, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "DX.pyx":11
*
*
* DTYPE = np.float # <<<<<<<<<<<<<<
* from libc.math cimport exp
*
*/
__pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_float); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (PyDict_SetItem(__pyx_d, __pyx_n_s_DTYPE, __pyx_t_2) < 0) __PYX_ERR(0, 11, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "DX.pyx":19
*
* @cython.boundscheck(False) # turn of bounds-checking for entire function
* def cython_DX_h(c_np.ndarray[DTYPE_t, ndim=3] Pr, # <<<<<<<<<<<<<<
* c_np.ndarray[DTYPE_t, ndim=1] lamda,
* c_np.ndarray[DTYPE_t, ndim=1] beta,
*/
__pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_2DX_1cython_DX_h, NULL, __pyx_n_s_DX); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_cython_DX_h, __pyx_t_2) < 0) __PYX_ERR(0, 19, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "DX.pyx":82
*
* @cython.boundscheck(False) # turn of bounds-checking for entire function
* def cython_DX_n(c_np.ndarray[DTYPE_t, ndim=2] DX, # <<<<<<<<<<<<<<
* int nSectors,
* int nZones,
*/
__pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_2DX_3cython_DX_n, NULL, __pyx_n_s_DX); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 82, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_cython_DX_n, __pyx_t_2) < 0) __PYX_ERR(0, 82, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "DX.pyx":1
* from __future__ import division # <<<<<<<<<<<<<<
* from cython.parallel cimport prange
*
*/
__pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_2) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "../../../../../../../Users/thomascapelle/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":997
* raise ImportError("numpy.core.umath failed to import")
*
* cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<<
* try:
* _import_umath()
*/
/*--- Wrapped vars code ---*/
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
if (__pyx_m) {
if (__pyx_d) {
__Pyx_AddTraceback("init DX", 0, __pyx_lineno, __pyx_filename);
}
Py_DECREF(__pyx_m); __pyx_m = 0;
} else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_ImportError, "init DX");
}
__pyx_L0:;
__Pyx_RefNannyFinishContext();
#if PY_MAJOR_VERSION < 3
return;
#else
return __pyx_m;
#endif
}
/* --- Runtime support code --- */
/* Refnanny */
#if CYTHON_REFNANNY
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
PyObject *m = NULL, *p = NULL;
void *r = NULL;
m = PyImport_ImportModule((char *)modname);
if (!m) goto end;
p = PyObject_GetAttrString(m, (char *)"RefNannyAPI");
if (!p) goto end;
r = PyLong_AsVoidPtr(p);
end:
Py_XDECREF(p);
Py_XDECREF(m);
return (__Pyx_RefNannyAPIStruct *)r;
}
#endif
/* GetBuiltinName */
static PyObject *__Pyx_GetBuiltinName(PyObject *name) {
PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name);
if (unlikely(!result)) {
PyErr_Format(PyExc_NameError,
#if PY_MAJOR_VERSION >= 3
"name '%U' is not defined", name);
#else
"name '%.200s' is not defined", PyString_AS_STRING(name));
#endif
}
return result;
}
/* RaiseArgTupleInvalid */
static void __Pyx_RaiseArgtupleInvalid(
const char* func_name,
int exact,
Py_ssize_t num_min,
Py_ssize_t num_max,
Py_ssize_t num_found)
{
Py_ssize_t num_expected;
const char *more_or_less;
if (num_found < num_min) {
num_expected = num_min;
more_or_less = "at least";
} else {
num_expected = num_max;
more_or_less = "at most";
}
if (exact) {
more_or_less = "exactly";
}
PyErr_Format(PyExc_TypeError,
"%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)",
func_name, more_or_less, num_expected,
(num_expected == 1) ? "" : "s", num_found);
}
/* RaiseDoubleKeywords */
static void __Pyx_RaiseDoubleKeywordsError(
const char* func_name,
PyObject* kw_name)
{
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION >= 3
"%s() got multiple values for keyword argument '%U'", func_name, kw_name);
#else
"%s() got multiple values for keyword argument '%s'", func_name,
PyString_AsString(kw_name));
#endif
}
/* ParseKeywords */
static int __Pyx_ParseOptionalKeywords(
PyObject *kwds,
PyObject **argnames[],
PyObject *kwds2,
PyObject *values[],
Py_ssize_t num_pos_args,
const char* function_name)
{
PyObject *key = 0, *value = 0;
Py_ssize_t pos = 0;
PyObject*** name;
PyObject*** first_kw_arg = argnames + num_pos_args;
while (PyDict_Next(kwds, &pos, &key, &value)) {
name = first_kw_arg;
while (*name && (**name != key)) name++;
if (*name) {
values[name-argnames] = value;
continue;
}
name = first_kw_arg;
#if PY_MAJOR_VERSION < 3
if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) {
while (*name) {
if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key))
&& _PyString_Eq(**name, key)) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
if ((**argname == key) || (
(CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key))
&& _PyString_Eq(**argname, key))) {
goto arg_passed_twice;
}
argname++;
}
}
} else
#endif
if (likely(PyUnicode_Check(key))) {
while (*name) {
int cmp = (**name == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 :
#endif
PyUnicode_Compare(**name, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
int cmp = (**argname == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 :
#endif
PyUnicode_Compare(**argname, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) goto arg_passed_twice;
argname++;
}
}
} else
goto invalid_keyword_type;
if (kwds2) {
if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
} else {
goto invalid_keyword;
}
}
return 0;
arg_passed_twice:
__Pyx_RaiseDoubleKeywordsError(function_name, key);
goto bad;
invalid_keyword_type:
PyErr_Format(PyExc_TypeError,
"%.200s() keywords must be strings", function_name);
goto bad;
invalid_keyword:
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION < 3
"%.200s() got an unexpected keyword argument '%.200s'",
function_name, PyString_AsString(key));
#else
"%s() got an unexpected keyword argument '%U'",
function_name, key);
#endif
bad:
return -1;
}
/* ArgTypeTest */
static void __Pyx_RaiseArgumentTypeInvalid(const char* name, PyObject *obj, PyTypeObject *type) {
PyErr_Format(PyExc_TypeError,
"Argument '%.200s' has incorrect type (expected %.200s, got %.200s)",
name, type->tp_name, Py_TYPE(obj)->tp_name);
}
static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed,
const char *name, int exact)
{
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
if (none_allowed && obj == Py_None) return 1;
else if (exact) {
if (likely(Py_TYPE(obj) == type)) return 1;
#if PY_MAJOR_VERSION == 2
else if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1;
#endif
}
else {
if (likely(PyObject_TypeCheck(obj, type))) return 1;
}
__Pyx_RaiseArgumentTypeInvalid(name, obj, type);
return 0;
}
/* IsLittleEndian */
static CYTHON_INLINE int __Pyx_Is_Little_Endian(void)
{
union {
uint32_t u32;
uint8_t u8[4];
} S;
S.u32 = 0x01020304;
return S.u8[0] == 4;
}
/* BufferFormatCheck */
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type) {
stack[0].field = &ctx->root;
stack[0].parent_offset = 0;
ctx->root.type = type;
ctx->root.name = "buffer dtype";
ctx->root.offset = 0;
ctx->head = stack;
ctx->head->field = &ctx->root;
ctx->fmt_offset = 0;
ctx->head->parent_offset = 0;
ctx->new_packmode = '@';
ctx->enc_packmode = '@';
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->is_complex = 0;
ctx->is_valid_array = 0;
ctx->struct_alignment = 0;
while (type->typegroup == 'S') {
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = 0;
type = type->fields->type;
}
}
static int __Pyx_BufFmt_ParseNumber(const char** ts) {
int count;
const char* t = *ts;
if (*t < '0' || *t > '9') {
return -1;
} else {
count = *t++ - '0';
while (*t >= '0' && *t < '9') {
count *= 10;
count += *t++ - '0';
}
}
*ts = t;
return count;
}
static int __Pyx_BufFmt_ExpectNumber(const char **ts) {
int number = __Pyx_BufFmt_ParseNumber(ts);
if (number == -1)
PyErr_Format(PyExc_ValueError,\
"Does not understand character buffer dtype format string ('%c')", **ts);
return number;
}
static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) {
PyErr_Format(PyExc_ValueError,
"Unexpected format string character: '%c'", ch);
}
static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) {
switch (ch) {
case 'c': return "'char'";
case 'b': return "'signed char'";
case 'B': return "'unsigned char'";
case 'h': return "'short'";
case 'H': return "'unsigned short'";
case 'i': return "'int'";
case 'I': return "'unsigned int'";
case 'l': return "'long'";
case 'L': return "'unsigned long'";
case 'q': return "'long long'";
case 'Q': return "'unsigned long long'";
case 'f': return (is_complex ? "'complex float'" : "'float'");
case 'd': return (is_complex ? "'complex double'" : "'double'");
case 'g': return (is_complex ? "'complex long double'" : "'long double'");
case 'T': return "a struct";
case 'O': return "Python object";
case 'P': return "a pointer";
case 's': case 'p': return "a string";
case 0: return "end";
default: return "unparseable format string";
}
}
static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return 2;
case 'i': case 'I': case 'l': case 'L': return 4;
case 'q': case 'Q': return 8;
case 'f': return (is_complex ? 8 : 4);
case 'd': return (is_complex ? 16 : 8);
case 'g': {
PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g')..");
return 0;
}
case 'O': case 'P': return sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) {
switch (ch) {
case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(short);
case 'i': case 'I': return sizeof(int);
case 'l': case 'L': return sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(float) * (is_complex ? 2 : 1);
case 'd': return sizeof(double) * (is_complex ? 2 : 1);
case 'g': return sizeof(long double) * (is_complex ? 2 : 1);
case 'O': case 'P': return sizeof(void*);
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
typedef struct { char c; short x; } __Pyx_st_short;
typedef struct { char c; int x; } __Pyx_st_int;
typedef struct { char c; long x; } __Pyx_st_long;
typedef struct { char c; float x; } __Pyx_st_float;
typedef struct { char c; double x; } __Pyx_st_double;
typedef struct { char c; long double x; } __Pyx_st_longdouble;
typedef struct { char c; void *x; } __Pyx_st_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_st_float) - sizeof(float);
case 'd': return sizeof(__Pyx_st_double) - sizeof(double);
case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
/* These are for computing the padding at the end of the struct to align
on the first member of the struct. This will probably the same as above,
but we don't have any guarantees.
*/
typedef struct { short x; char c; } __Pyx_pad_short;
typedef struct { int x; char c; } __Pyx_pad_int;
typedef struct { long x; char c; } __Pyx_pad_long;
typedef struct { float x; char c; } __Pyx_pad_float;
typedef struct { double x; char c; } __Pyx_pad_double;
typedef struct { long double x; char c; } __Pyx_pad_longdouble;
typedef struct { void *x; char c; } __Pyx_pad_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_pad_float) - sizeof(float);
case 'd': return sizeof(__Pyx_pad_double) - sizeof(double);
case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) {
switch (ch) {
case 'c':
return 'H';
case 'b': case 'h': case 'i':
case 'l': case 'q': case 's': case 'p':
return 'I';
case 'B': case 'H': case 'I': case 'L': case 'Q':
return 'U';
case 'f': case 'd': case 'g':
return (is_complex ? 'C' : 'R');
case 'O':
return 'O';
case 'P':
return 'P';
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) {
if (ctx->head == NULL || ctx->head->field == &ctx->root) {
const char* expected;
const char* quote;
if (ctx->head == NULL) {
expected = "end";
quote = "";
} else {
expected = ctx->head->field->type->name;
quote = "'";
}
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected %s%s%s but got %s",
quote, expected, quote,
__Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex));
} else {
__Pyx_StructField* field = ctx->head->field;
__Pyx_StructField* parent = (ctx->head - 1)->field;
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'",
field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex),
parent->type->name, field->name);
}
}
static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) {
char group;
size_t size, offset, arraysize = 1;
if (ctx->enc_type == 0) return 0;
if (ctx->head->field->type->arraysize[0]) {
int i, ndim = 0;
if (ctx->enc_type == 's' || ctx->enc_type == 'p') {
ctx->is_valid_array = ctx->head->field->type->ndim == 1;
ndim = 1;
if (ctx->enc_count != ctx->head->field->type->arraysize[0]) {
PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %zu",
ctx->head->field->type->arraysize[0], ctx->enc_count);
return -1;
}
}
if (!ctx->is_valid_array) {
PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d",
ctx->head->field->type->ndim, ndim);
return -1;
}
for (i = 0; i < ctx->head->field->type->ndim; i++) {
arraysize *= ctx->head->field->type->arraysize[i];
}
ctx->is_valid_array = 0;
ctx->enc_count = 1;
}
group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex);
do {
__Pyx_StructField* field = ctx->head->field;
__Pyx_TypeInfo* type = field->type;
if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') {
size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex);
} else {
size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex);
}
if (ctx->enc_packmode == '@') {
size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex);
size_t align_mod_offset;
if (align_at == 0) return -1;
align_mod_offset = ctx->fmt_offset % align_at;
if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset;
if (ctx->struct_alignment == 0)
ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type,
ctx->is_complex);
}
if (type->size != size || type->typegroup != group) {
if (type->typegroup == 'C' && type->fields != NULL) {
size_t parent_offset = ctx->head->parent_offset + field->offset;
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = parent_offset;
continue;
}
if ((type->typegroup == 'H' || group == 'H') && type->size == size) {
} else {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
}
offset = ctx->head->parent_offset + field->offset;
if (ctx->fmt_offset != offset) {
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected",
(Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset);
return -1;
}
ctx->fmt_offset += size;
if (arraysize)
ctx->fmt_offset += (arraysize - 1) * size;
--ctx->enc_count;
while (1) {
if (field == &ctx->root) {
ctx->head = NULL;
if (ctx->enc_count != 0) {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
break;
}
ctx->head->field = ++field;
if (field->type == NULL) {
--ctx->head;
field = ctx->head->field;
continue;
} else if (field->type->typegroup == 'S') {
size_t parent_offset = ctx->head->parent_offset + field->offset;
if (field->type->fields->type == NULL) continue;
field = field->type->fields;
++ctx->head;
ctx->head->field = field;
ctx->head->parent_offset = parent_offset;
break;
} else {
break;
}
}
} while (ctx->enc_count);
ctx->enc_type = 0;
ctx->is_complex = 0;
return 0;
}
static CYTHON_INLINE PyObject *
__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp)
{
const char *ts = *tsp;
int i = 0, number;
int ndim = ctx->head->field->type->ndim;
;
++ts;
if (ctx->new_count != 1) {
PyErr_SetString(PyExc_ValueError,
"Cannot handle repeated arrays in format string");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
while (*ts && *ts != ')') {
switch (*ts) {
case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue;
default: break;
}
number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i])
return PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %d",
ctx->head->field->type->arraysize[i], number);
if (*ts != ',' && *ts != ')')
return PyErr_Format(PyExc_ValueError,
"Expected a comma in format string, got '%c'", *ts);
if (*ts == ',') ts++;
i++;
}
if (i != ndim)
return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d",
ctx->head->field->type->ndim, i);
if (!*ts) {
PyErr_SetString(PyExc_ValueError,
"Unexpected end of format string, expected ')'");
return NULL;
}
ctx->is_valid_array = 1;
ctx->new_count = 1;
*tsp = ++ts;
return Py_None;
}
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) {
int got_Z = 0;
while (1) {
switch(*ts) {
case 0:
if (ctx->enc_type != 0 && ctx->head == NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
if (ctx->head != NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
return ts;
case ' ':
case '\r':
case '\n':
++ts;
break;
case '<':
if (!__Pyx_Is_Little_Endian()) {
PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '>':
case '!':
if (__Pyx_Is_Little_Endian()) {
PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '=':
case '@':
case '^':
ctx->new_packmode = *ts++;
break;
case 'T':
{
const char* ts_after_sub;
size_t i, struct_count = ctx->new_count;
size_t struct_alignment = ctx->struct_alignment;
ctx->new_count = 1;
++ts;
if (*ts != '{') {
PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0;
ctx->enc_count = 0;
ctx->struct_alignment = 0;
++ts;
ts_after_sub = ts;
for (i = 0; i != struct_count; ++i) {
ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts);
if (!ts_after_sub) return NULL;
}
ts = ts_after_sub;
if (struct_alignment) ctx->struct_alignment = struct_alignment;
}
break;
case '}':
{
size_t alignment = ctx->struct_alignment;
++ts;
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0;
if (alignment && ctx->fmt_offset % alignment) {
ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment);
}
}
return ts;
case 'x':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->fmt_offset += ctx->new_count;
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->enc_packmode = ctx->new_packmode;
++ts;
break;
case 'Z':
got_Z = 1;
++ts;
if (*ts != 'f' && *ts != 'd' && *ts != 'g') {
__Pyx_BufFmt_RaiseUnexpectedChar('Z');
return NULL;
}
case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I':
case 'l': case 'L': case 'q': case 'Q':
case 'f': case 'd': case 'g':
case 'O': case 'p':
if (ctx->enc_type == *ts && got_Z == ctx->is_complex &&
ctx->enc_packmode == ctx->new_packmode) {
ctx->enc_count += ctx->new_count;
ctx->new_count = 1;
got_Z = 0;
++ts;
break;
}
case 's':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_count = ctx->new_count;
ctx->enc_packmode = ctx->new_packmode;
ctx->enc_type = *ts;
ctx->is_complex = got_Z;
++ts;
ctx->new_count = 1;
got_Z = 0;
break;
case ':':
++ts;
while(*ts != ':') ++ts;
++ts;
break;
case '(':
if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL;
break;
default:
{
int number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
ctx->new_count = (size_t)number;
}
}
}
}
static CYTHON_INLINE void __Pyx_ZeroBuffer(Py_buffer* buf) {
buf->buf = NULL;
buf->obj = NULL;
buf->strides = __Pyx_zeros;
buf->shape = __Pyx_zeros;
buf->suboffsets = __Pyx_minusones;
}
static CYTHON_INLINE int __Pyx_GetBufferAndValidate(
Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags,
int nd, int cast, __Pyx_BufFmt_StackElem* stack)
{
if (obj == Py_None || obj == NULL) {
__Pyx_ZeroBuffer(buf);
return 0;
}
buf->buf = NULL;
if (__Pyx_GetBuffer(obj, buf, flags) == -1) goto fail;
if (buf->ndim != nd) {
PyErr_Format(PyExc_ValueError,
"Buffer has wrong number of dimensions (expected %d, got %d)",
nd, buf->ndim);
goto fail;
}
if (!cast) {
__Pyx_BufFmt_Context ctx;
__Pyx_BufFmt_Init(&ctx, stack, dtype);
if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail;
}
if ((unsigned)buf->itemsize != dtype->size) {
PyErr_Format(PyExc_ValueError,
"Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)",
buf->itemsize, (buf->itemsize > 1) ? "s" : "",
dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : "");
goto fail;
}
if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones;
return 0;
fail:;
__Pyx_ZeroBuffer(buf);
return -1;
}
static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) {
if (info->buf == NULL) return;
if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL;
__Pyx_ReleaseBuffer(info);
}
/* GetModuleGlobalName */
static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name) {
PyObject *result;
#if !CYTHON_AVOID_BORROWED_REFS
result = PyDict_GetItem(__pyx_d, name);
if (likely(result)) {
Py_INCREF(result);
} else {
#else
result = PyObject_GetItem(__pyx_d, name);
if (!result) {
PyErr_Clear();
#endif
result = __Pyx_GetBuiltinName(name);
}
return result;
}
/* PyObjectCall */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) {
PyObject *result;
ternaryfunc call = func->ob_type->tp_call;
if (unlikely(!call))
return PyObject_Call(func, arg, kw);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = (*call)(func, arg, kw);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* ExtTypeTest */
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) {
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
if (likely(PyObject_TypeCheck(obj, type)))
return 1;
PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s",
Py_TYPE(obj)->tp_name, type->tp_name);
return 0;
}
/* PyErrFetchRestore */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
tmp_type = tstate->curexc_type;
tmp_value = tstate->curexc_value;
tmp_tb = tstate->curexc_traceback;
tstate->curexc_type = type;
tstate->curexc_value = value;
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
*type = tstate->curexc_type;
*value = tstate->curexc_value;
*tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
}
#endif
/* RaiseException */
#if PY_MAJOR_VERSION < 3
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb,
CYTHON_UNUSED PyObject *cause) {
__Pyx_PyThreadState_declare
Py_XINCREF(type);
if (!value || value == Py_None)
value = NULL;
else
Py_INCREF(value);
if (!tb || tb == Py_None)
tb = NULL;
else {
Py_INCREF(tb);
if (!PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto raise_error;
}
}
if (PyType_Check(type)) {
#if CYTHON_COMPILING_IN_PYPY
if (!value) {
Py_INCREF(Py_None);
value = Py_None;
}
#endif
PyErr_NormalizeException(&type, &value, &tb);
} else {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto raise_error;
}
value = type;
type = (PyObject*) Py_TYPE(type);
Py_INCREF(type);
if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto raise_error;
}
}
__Pyx_PyThreadState_assign
__Pyx_ErrRestore(type, value, tb);
return;
raise_error:
Py_XDECREF(value);
Py_XDECREF(type);
Py_XDECREF(tb);
return;
}
#else
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
PyObject* owned_instance = NULL;
if (tb == Py_None) {
tb = 0;
} else if (tb && !PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto bad;
}
if (value == Py_None)
value = 0;
if (PyExceptionInstance_Check(type)) {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto bad;
}
value = type;
type = (PyObject*) Py_TYPE(value);
} else if (PyExceptionClass_Check(type)) {
PyObject *instance_class = NULL;
if (value && PyExceptionInstance_Check(value)) {
instance_class = (PyObject*) Py_TYPE(value);
if (instance_class != type) {
int is_subclass = PyObject_IsSubclass(instance_class, type);
if (!is_subclass) {
instance_class = NULL;
} else if (unlikely(is_subclass == -1)) {
goto bad;
} else {
type = instance_class;
}
}
}
if (!instance_class) {
PyObject *args;
if (!value)
args = PyTuple_New(0);
else if (PyTuple_Check(value)) {
Py_INCREF(value);
args = value;
} else
args = PyTuple_Pack(1, value);
if (!args)
goto bad;
owned_instance = PyObject_Call(type, args, NULL);
Py_DECREF(args);
if (!owned_instance)
goto bad;
value = owned_instance;
if (!PyExceptionInstance_Check(value)) {
PyErr_Format(PyExc_TypeError,
"calling %R should have returned an instance of "
"BaseException, not %R",
type, Py_TYPE(value));
goto bad;
}
}
} else {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto bad;
}
#if PY_VERSION_HEX >= 0x03030000
if (cause) {
#else
if (cause && cause != Py_None) {
#endif
PyObject *fixed_cause;
if (cause == Py_None) {
fixed_cause = NULL;
} else if (PyExceptionClass_Check(cause)) {
fixed_cause = PyObject_CallObject(cause, NULL);
if (fixed_cause == NULL)
goto bad;
} else if (PyExceptionInstance_Check(cause)) {
fixed_cause = cause;
Py_INCREF(fixed_cause);
} else {
PyErr_SetString(PyExc_TypeError,
"exception causes must derive from "
"BaseException");
goto bad;
}
PyException_SetCause(value, fixed_cause);
}
PyErr_SetObject(type, value);
if (tb) {
#if CYTHON_COMPILING_IN_PYPY
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb);
Py_INCREF(tb);
PyErr_Restore(tmp_type, tmp_value, tb);
Py_XDECREF(tmp_tb);
#else
PyThreadState *tstate = PyThreadState_GET();
PyObject* tmp_tb = tstate->curexc_traceback;
if (tb != tmp_tb) {
Py_INCREF(tb);
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_tb);
}
#endif
}
bad:
Py_XDECREF(owned_instance);
return;
}
#endif
/* RaiseTooManyValuesToUnpack */
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) {
PyErr_Format(PyExc_ValueError,
"too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected);
}
/* RaiseNeedMoreValuesToUnpack */
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) {
PyErr_Format(PyExc_ValueError,
"need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack",
index, (index == 1) ? "" : "s");
}
/* RaiseNoneIterError */
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
}
/* SaveResetException */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
*type = tstate->exc_type;
*value = tstate->exc_value;
*tb = tstate->exc_traceback;
Py_XINCREF(*type);
Py_XINCREF(*value);
Py_XINCREF(*tb);
}
static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = type;
tstate->exc_value = value;
tstate->exc_traceback = tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
#endif
/* PyErrExceptionMatches */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) {
PyObject *exc_type = tstate->curexc_type;
if (exc_type == err) return 1;
if (unlikely(!exc_type)) return 0;
return PyErr_GivenExceptionMatches(exc_type, err);
}
#endif
/* GetException */
#if CYTHON_FAST_THREAD_STATE
static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
#else
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) {
#endif
PyObject *local_type, *local_value, *local_tb;
#if CYTHON_FAST_THREAD_STATE
PyObject *tmp_type, *tmp_value, *tmp_tb;
local_type = tstate->curexc_type;
local_value = tstate->curexc_value;
local_tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
#else
PyErr_Fetch(&local_type, &local_value, &local_tb);
#endif
PyErr_NormalizeException(&local_type, &local_value, &local_tb);
#if CYTHON_FAST_THREAD_STATE
if (unlikely(tstate->curexc_type))
#else
if (unlikely(PyErr_Occurred()))
#endif
goto bad;
#if PY_MAJOR_VERSION >= 3
if (local_tb) {
if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0))
goto bad;
}
#endif
Py_XINCREF(local_tb);
Py_XINCREF(local_type);
Py_XINCREF(local_value);
*type = local_type;
*value = local_value;
*tb = local_tb;
#if CYTHON_FAST_THREAD_STATE
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = local_type;
tstate->exc_value = local_value;
tstate->exc_traceback = local_tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
#else
PyErr_SetExcInfo(local_type, local_value, local_tb);
#endif
return 0;
bad:
*type = 0;
*value = 0;
*tb = 0;
Py_XDECREF(local_type);
Py_XDECREF(local_value);
Py_XDECREF(local_tb);
return -1;
}
/* Import */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) {
PyObject *empty_list = 0;
PyObject *module = 0;
PyObject *global_dict = 0;
PyObject *empty_dict = 0;
PyObject *list;
#if PY_VERSION_HEX < 0x03030000
PyObject *py_import;
py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import);
if (!py_import)
goto bad;
#endif
if (from_list)
list = from_list;
else {
empty_list = PyList_New(0);
if (!empty_list)
goto bad;
list = empty_list;
}
global_dict = PyModule_GetDict(__pyx_m);
if (!global_dict)
goto bad;
empty_dict = PyDict_New();
if (!empty_dict)
goto bad;
{
#if PY_MAJOR_VERSION >= 3
if (level == -1) {
if (strchr(__Pyx_MODULE_NAME, '.')) {
#if PY_VERSION_HEX < 0x03030000
PyObject *py_level = PyInt_FromLong(1);
if (!py_level)
goto bad;
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, py_level, NULL);
Py_DECREF(py_level);
#else
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, 1);
#endif
if (!module) {
if (!PyErr_ExceptionMatches(PyExc_ImportError))
goto bad;
PyErr_Clear();
}
}
level = 0;
}
#endif
if (!module) {
#if PY_VERSION_HEX < 0x03030000
PyObject *py_level = PyInt_FromLong(level);
if (!py_level)
goto bad;
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, py_level, NULL);
Py_DECREF(py_level);
#else
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, level);
#endif
}
}
bad:
#if PY_VERSION_HEX < 0x03030000
Py_XDECREF(py_import);
#endif
Py_XDECREF(empty_list);
Py_XDECREF(empty_dict);
return module;
}
/* CLineInTraceback */
static int __Pyx_CLineForTraceback(int c_line) {
#ifdef CYTHON_CLINE_IN_TRACEBACK
return ((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0;
#else
PyObject *use_cline;
#if CYTHON_COMPILING_IN_CPYTHON
PyObject **cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime);
if (likely(cython_runtime_dict)) {
use_cline = PyDict_GetItem(*cython_runtime_dict, __pyx_n_s_cline_in_traceback);
} else
#endif
{
PyObject *ptype, *pvalue, *ptraceback;
PyObject *use_cline_obj;
PyErr_Fetch(&ptype, &pvalue, &ptraceback);
use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback);
if (use_cline_obj) {
use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True;
Py_DECREF(use_cline_obj);
} else {
use_cline = NULL;
}
PyErr_Restore(ptype, pvalue, ptraceback);
}
if (!use_cline) {
c_line = 0;
PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False);
}
else if (PyObject_Not(use_cline) != 0) {
c_line = 0;
}
return c_line;
#endif
}
/* CodeObjectCache */
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
int start = 0, mid = 0, end = count - 1;
if (end >= 0 && code_line > entries[end].code_line) {
return count;
}
while (start < end) {
mid = start + (end - start) / 2;
if (code_line < entries[mid].code_line) {
end = mid;
} else if (code_line > entries[mid].code_line) {
start = mid + 1;
} else {
return mid;
}
}
if (code_line <= entries[mid].code_line) {
return mid;
} else {
return mid + 1;
}
}
static PyCodeObject *__pyx_find_code_object(int code_line) {
PyCodeObject* code_object;
int pos;
if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) {
return NULL;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) {
return NULL;
}
code_object = __pyx_code_cache.entries[pos].code_object;
Py_INCREF(code_object);
return code_object;
}
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) {
int pos, i;
__Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;
if (unlikely(!code_line)) {
return;
}
if (unlikely(!entries)) {
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry));
if (likely(entries)) {
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = 64;
__pyx_code_cache.count = 1;
entries[0].code_line = code_line;
entries[0].code_object = code_object;
Py_INCREF(code_object);
}
return;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) {
PyCodeObject* tmp = entries[pos].code_object;
entries[pos].code_object = code_object;
Py_DECREF(tmp);
return;
}
if (__pyx_code_cache.count == __pyx_code_cache.max_count) {
int new_max = __pyx_code_cache.max_count + 64;
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc(
__pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry));
if (unlikely(!entries)) {
return;
}
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = new_max;
}
for (i=__pyx_code_cache.count; i>pos; i--) {
entries[i] = entries[i-1];
}
entries[pos].code_line = code_line;
entries[pos].code_object = code_object;
__pyx_code_cache.count++;
Py_INCREF(code_object);
}
/* AddTraceback */
#include "compile.h"
#include "frameobject.h"
#include "traceback.h"
static PyCodeObject* __Pyx_CreateCodeObjectForTraceback(
const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyObject *py_srcfile = 0;
PyObject *py_funcname = 0;
#if PY_MAJOR_VERSION < 3
py_srcfile = PyString_FromString(filename);
#else
py_srcfile = PyUnicode_FromString(filename);
#endif
if (!py_srcfile) goto bad;
if (c_line) {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#else
py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#endif
}
else {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromString(funcname);
#else
py_funcname = PyUnicode_FromString(funcname);
#endif
}
if (!py_funcname) goto bad;
py_code = __Pyx_PyCode_New(
0,
0,
0,
0,
0,
__pyx_empty_bytes, /*PyObject *code,*/
__pyx_empty_tuple, /*PyObject *consts,*/
__pyx_empty_tuple, /*PyObject *names,*/
__pyx_empty_tuple, /*PyObject *varnames,*/
__pyx_empty_tuple, /*PyObject *freevars,*/
__pyx_empty_tuple, /*PyObject *cellvars,*/
py_srcfile, /*PyObject *filename,*/
py_funcname, /*PyObject *name,*/
py_line,
__pyx_empty_bytes /*PyObject *lnotab*/
);
Py_DECREF(py_srcfile);
Py_DECREF(py_funcname);
return py_code;
bad:
Py_XDECREF(py_srcfile);
Py_XDECREF(py_funcname);
return NULL;
}
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyFrameObject *py_frame = 0;
if (c_line) {
c_line = __Pyx_CLineForTraceback(c_line);
}
py_code = __pyx_find_code_object(c_line ? -c_line : py_line);
if (!py_code) {
py_code = __Pyx_CreateCodeObjectForTraceback(
funcname, c_line, py_line, filename);
if (!py_code) goto bad;
__pyx_insert_code_object(c_line ? -c_line : py_line, py_code);
}
py_frame = PyFrame_New(
PyThreadState_GET(), /*PyThreadState *tstate,*/
py_code, /*PyCodeObject *code,*/
__pyx_d, /*PyObject *globals,*/
0 /*PyObject *locals*/
);
if (!py_frame) goto bad;
__Pyx_PyFrame_SetLineNumber(py_frame, py_line);
PyTraceBack_Here(py_frame);
bad:
Py_XDECREF(py_code);
Py_XDECREF(py_frame);
}
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) {
if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags);
if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pw_5numpy_7ndarray_1__getbuffer__(obj, view, flags);
PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name);
return -1;
}
static void __Pyx_ReleaseBuffer(Py_buffer *view) {
PyObject *obj = view->obj;
if (!obj) return;
if (PyObject_CheckBuffer(obj)) {
PyBuffer_Release(view);
return;
}
if ((0)) {}
else if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) __pyx_pw_5numpy_7ndarray_3__releasebuffer__(obj, view);
view->obj = NULL;
Py_DECREF(obj);
}
#endif
/* CIntFromPyVerify */
#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0)
#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1)
#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\
{\
func_type value = func_value;\
if (sizeof(target_type) < sizeof(func_type)) {\
if (unlikely(value != (func_type) (target_type) value)) {\
func_type zero = 0;\
if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\
return (target_type) -1;\
if (is_unsigned && unlikely(value < zero))\
goto raise_neg_overflow;\
else\
goto raise_overflow;\
}\
}\
return (target_type) value;\
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) {
const int neg_one = (int) -1, const_zero = (int) 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(int) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(int) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(int) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(int),
little, !is_unsigned);
}
}
/* Declarations */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
return ::std::complex< float >(x, y);
}
#else
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
return x + y*(__pyx_t_float_complex)_Complex_I;
}
#endif
#else
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
__pyx_t_float_complex z;
z.real = x;
z.imag = y;
return z;
}
#endif
/* Arithmetic */
#if CYTHON_CCOMPLEX
#else
static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
return (a.real == b.real) && (a.imag == b.imag);
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real + b.real;
z.imag = a.imag + b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real - b.real;
z.imag = a.imag - b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real * b.real - a.imag * b.imag;
z.imag = a.real * b.imag + a.imag * b.real;
return z;
}
#if 1
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
if (b.imag == 0) {
return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real);
} else if (fabsf(b.real) >= fabsf(b.imag)) {
if (b.real == 0 && b.imag == 0) {
return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.imag);
} else {
float r = b.imag / b.real;
float s = 1.0 / (b.real + b.imag * r);
return __pyx_t_float_complex_from_parts(
(a.real + a.imag * r) * s, (a.imag - a.real * r) * s);
}
} else {
float r = b.real / b.imag;
float s = 1.0 / (b.imag + b.real * r);
return __pyx_t_float_complex_from_parts(
(a.real * r + a.imag) * s, (a.imag * r - a.real) * s);
}
}
#else
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
if (b.imag == 0) {
return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real);
} else {
float denom = b.real * b.real + b.imag * b.imag;
return __pyx_t_float_complex_from_parts(
(a.real * b.real + a.imag * b.imag) / denom,
(a.imag * b.real - a.real * b.imag) / denom);
}
}
#endif
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex a) {
__pyx_t_float_complex z;
z.real = -a.real;
z.imag = -a.imag;
return z;
}
static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex a) {
return (a.real == 0) && (a.imag == 0);
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex a) {
__pyx_t_float_complex z;
z.real = a.real;
z.imag = -a.imag;
return z;
}
#if 1
static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex z) {
#if !defined(HAVE_HYPOT) || defined(_MSC_VER)
return sqrtf(z.real*z.real + z.imag*z.imag);
#else
return hypotf(z.real, z.imag);
#endif
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
float r, lnr, theta, z_r, z_theta;
if (b.imag == 0 && b.real == (int)b.real) {
if (b.real < 0) {
float denom = a.real * a.real + a.imag * a.imag;
a.real = a.real / denom;
a.imag = -a.imag / denom;
b.real = -b.real;
}
switch ((int)b.real) {
case 0:
z.real = 1;
z.imag = 0;
return z;
case 1:
return a;
case 2:
z = __Pyx_c_prod_float(a, a);
return __Pyx_c_prod_float(a, a);
case 3:
z = __Pyx_c_prod_float(a, a);
return __Pyx_c_prod_float(z, a);
case 4:
z = __Pyx_c_prod_float(a, a);
return __Pyx_c_prod_float(z, z);
}
}
if (a.imag == 0) {
if (a.real == 0) {
return a;
} else if (b.imag == 0) {
z.real = powf(a.real, b.real);
z.imag = 0;
return z;
} else if (a.real > 0) {
r = a.real;
theta = 0;
} else {
r = -a.real;
theta = atan2f(0, -1);
}
} else {
r = __Pyx_c_abs_float(a);
theta = atan2f(a.imag, a.real);
}
lnr = logf(r);
z_r = expf(lnr * b.real - theta * b.imag);
z_theta = theta * b.real + lnr * b.imag;
z.real = z_r * cosf(z_theta);
z.imag = z_r * sinf(z_theta);
return z;
}
#endif
#endif
/* Declarations */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
return ::std::complex< double >(x, y);
}
#else
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
return x + y*(__pyx_t_double_complex)_Complex_I;
}
#endif
#else
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
__pyx_t_double_complex z;
z.real = x;
z.imag = y;
return z;
}
#endif
/* Arithmetic */
#if CYTHON_CCOMPLEX
#else
static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
return (a.real == b.real) && (a.imag == b.imag);
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real + b.real;
z.imag = a.imag + b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real - b.real;
z.imag = a.imag - b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real * b.real - a.imag * b.imag;
z.imag = a.real * b.imag + a.imag * b.real;
return z;
}
#if 1
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
if (b.imag == 0) {
return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real);
} else if (fabs(b.real) >= fabs(b.imag)) {
if (b.real == 0 && b.imag == 0) {
return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.imag);
} else {
double r = b.imag / b.real;
double s = 1.0 / (b.real + b.imag * r);
return __pyx_t_double_complex_from_parts(
(a.real + a.imag * r) * s, (a.imag - a.real * r) * s);
}
} else {
double r = b.real / b.imag;
double s = 1.0 / (b.imag + b.real * r);
return __pyx_t_double_complex_from_parts(
(a.real * r + a.imag) * s, (a.imag * r - a.real) * s);
}
}
#else
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
if (b.imag == 0) {
return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real);
} else {
double denom = b.real * b.real + b.imag * b.imag;
return __pyx_t_double_complex_from_parts(
(a.real * b.real + a.imag * b.imag) / denom,
(a.imag * b.real - a.real * b.imag) / denom);
}
}
#endif
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex a) {
__pyx_t_double_complex z;
z.real = -a.real;
z.imag = -a.imag;
return z;
}
static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex a) {
return (a.real == 0) && (a.imag == 0);
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex a) {
__pyx_t_double_complex z;
z.real = a.real;
z.imag = -a.imag;
return z;
}
#if 1
static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex z) {
#if !defined(HAVE_HYPOT) || defined(_MSC_VER)
return sqrt(z.real*z.real + z.imag*z.imag);
#else
return hypot(z.real, z.imag);
#endif
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
double r, lnr, theta, z_r, z_theta;
if (b.imag == 0 && b.real == (int)b.real) {
if (b.real < 0) {
double denom = a.real * a.real + a.imag * a.imag;
a.real = a.real / denom;
a.imag = -a.imag / denom;
b.real = -b.real;
}
switch ((int)b.real) {
case 0:
z.real = 1;
z.imag = 0;
return z;
case 1:
return a;
case 2:
z = __Pyx_c_prod_double(a, a);
return __Pyx_c_prod_double(a, a);
case 3:
z = __Pyx_c_prod_double(a, a);
return __Pyx_c_prod_double(z, a);
case 4:
z = __Pyx_c_prod_double(a, a);
return __Pyx_c_prod_double(z, z);
}
}
if (a.imag == 0) {
if (a.real == 0) {
return a;
} else if (b.imag == 0) {
z.real = pow(a.real, b.real);
z.imag = 0;
return z;
} else if (a.real > 0) {
r = a.real;
theta = 0;
} else {
r = -a.real;
theta = atan2(0, -1);
}
} else {
r = __Pyx_c_abs_double(a);
theta = atan2(a.imag, a.real);
}
lnr = log(r);
z_r = exp(lnr * b.real - theta * b.imag);
z_theta = theta * b.real + lnr * b.imag;
z.real = z_r * cos(z_theta);
z.imag = z_r * sin(z_theta);
return z;
}
#endif
#endif
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value) {
const enum NPY_TYPES neg_one = (enum NPY_TYPES) -1, const_zero = (enum NPY_TYPES) 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(enum NPY_TYPES) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(enum NPY_TYPES) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(enum NPY_TYPES) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(enum NPY_TYPES),
little, !is_unsigned);
}
}
/* CIntFromPy */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
const int neg_one = (int) -1, const_zero = (int) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(int) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (int) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0])
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) {
return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) {
return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) {
return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (int) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(int) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0])
case -2:
if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
}
#endif
if (sizeof(int) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
int val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (int) -1;
}
} else {
int val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (int) -1;
val = __Pyx_PyInt_As_int(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to int");
return (int) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to int");
return (int) -1;
}
/* CIntFromPy */
static CYTHON_INLINE unsigned int __Pyx_PyInt_As_unsigned_int(PyObject *x) {
const unsigned int neg_one = (unsigned int) -1, const_zero = (unsigned int) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(unsigned int) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(unsigned int, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (unsigned int) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (unsigned int) 0;
case 1: __PYX_VERIFY_RETURN_INT(unsigned int, digit, digits[0])
case 2:
if (8 * sizeof(unsigned int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(unsigned int) >= 2 * PyLong_SHIFT) {
return (unsigned int) (((((unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(unsigned int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(unsigned int) >= 3 * PyLong_SHIFT) {
return (unsigned int) (((((((unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(unsigned int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(unsigned int) >= 4 * PyLong_SHIFT) {
return (unsigned int) (((((((((unsigned int)digits[3]) << PyLong_SHIFT) | (unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (unsigned int) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(unsigned int) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(unsigned int, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(unsigned int) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(unsigned int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (unsigned int) 0;
case -1: __PYX_VERIFY_RETURN_INT(unsigned int, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(unsigned int, digit, +digits[0])
case -2:
if (8 * sizeof(unsigned int) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(unsigned int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(unsigned int) - 1 > 2 * PyLong_SHIFT) {
return (unsigned int) (((unsigned int)-1)*(((((unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(unsigned int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(unsigned int) - 1 > 2 * PyLong_SHIFT) {
return (unsigned int) ((((((unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(unsigned int) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(unsigned int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(unsigned int) - 1 > 3 * PyLong_SHIFT) {
return (unsigned int) (((unsigned int)-1)*(((((((unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(unsigned int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(unsigned int) - 1 > 3 * PyLong_SHIFT) {
return (unsigned int) ((((((((unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(unsigned int) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(unsigned int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(unsigned int) - 1 > 4 * PyLong_SHIFT) {
return (unsigned int) (((unsigned int)-1)*(((((((((unsigned int)digits[3]) << PyLong_SHIFT) | (unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(unsigned int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(unsigned int) - 1 > 4 * PyLong_SHIFT) {
return (unsigned int) ((((((((((unsigned int)digits[3]) << PyLong_SHIFT) | (unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])));
}
}
break;
}
#endif
if (sizeof(unsigned int) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(unsigned int, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(unsigned int) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(unsigned int, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
unsigned int val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (unsigned int) -1;
}
} else {
unsigned int val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (unsigned int) -1;
val = __Pyx_PyInt_As_unsigned_int(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to unsigned int");
return (unsigned int) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to unsigned int");
return (unsigned int) -1;
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
const long neg_one = (long) -1, const_zero = (long) 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(long) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(long) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(long) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(long),
little, !is_unsigned);
}
}
/* CIntFromPy */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
const long neg_one = (long) -1, const_zero = (long) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(long) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (long) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0])
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) {
return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) {
return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) {
return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (long) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(long) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0])
case -2:
if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
}
#endif
if (sizeof(long) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
long val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (long) -1;
}
} else {
long val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (long) -1;
val = __Pyx_PyInt_As_long(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to long");
return (long) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to long");
return (long) -1;
}
/* CheckBinaryVersion */
static int __Pyx_check_binary_version(void) {
char ctversion[4], rtversion[4];
PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION);
PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion());
if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) {
char message[200];
PyOS_snprintf(message, sizeof(message),
"compiletime version %s of module '%.100s' "
"does not match runtime version %s",
ctversion, __Pyx_MODULE_NAME, rtversion);
return PyErr_WarnEx(NULL, message, 1);
}
return 0;
}
/* ModuleImport */
#ifndef __PYX_HAVE_RT_ImportModule
#define __PYX_HAVE_RT_ImportModule
static PyObject *__Pyx_ImportModule(const char *name) {
PyObject *py_name = 0;
PyObject *py_module = 0;
py_name = __Pyx_PyIdentifier_FromString(name);
if (!py_name)
goto bad;
py_module = PyImport_Import(py_name);
Py_DECREF(py_name);
return py_module;
bad:
Py_XDECREF(py_name);
return 0;
}
#endif
/* TypeImport */
#ifndef __PYX_HAVE_RT_ImportType
#define __PYX_HAVE_RT_ImportType
static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name,
size_t size, int strict)
{
PyObject *py_module = 0;
PyObject *result = 0;
PyObject *py_name = 0;
char warning[200];
Py_ssize_t basicsize;
#ifdef Py_LIMITED_API
PyObject *py_basicsize;
#endif
py_module = __Pyx_ImportModule(module_name);
if (!py_module)
goto bad;
py_name = __Pyx_PyIdentifier_FromString(class_name);
if (!py_name)
goto bad;
result = PyObject_GetAttr(py_module, py_name);
Py_DECREF(py_name);
py_name = 0;
Py_DECREF(py_module);
py_module = 0;
if (!result)
goto bad;
if (!PyType_Check(result)) {
PyErr_Format(PyExc_TypeError,
"%.200s.%.200s is not a type object",
module_name, class_name);
goto bad;
}
#ifndef Py_LIMITED_API
basicsize = ((PyTypeObject *)result)->tp_basicsize;
#else
py_basicsize = PyObject_GetAttrString(result, "__basicsize__");
if (!py_basicsize)
goto bad;
basicsize = PyLong_AsSsize_t(py_basicsize);
Py_DECREF(py_basicsize);
py_basicsize = 0;
if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred())
goto bad;
#endif
if (!strict && (size_t)basicsize > size) {
PyOS_snprintf(warning, sizeof(warning),
"%s.%s size changed, may indicate binary incompatibility. Expected %zd, got %zd",
module_name, class_name, basicsize, size);
if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad;
}
else if ((size_t)basicsize != size) {
PyErr_Format(PyExc_ValueError,
"%.200s.%.200s has the wrong size, try recompiling. Expected %zd, got %zd",
module_name, class_name, basicsize, size);
goto bad;
}
return (PyTypeObject *)result;
bad:
Py_XDECREF(py_module);
Py_XDECREF(result);
return NULL;
}
#endif
/* InitStrings */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
while (t->p) {
#if PY_MAJOR_VERSION < 3
if (t->is_unicode) {
*t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
} else if (t->intern) {
*t->p = PyString_InternFromString(t->s);
} else {
*t->p = PyString_FromStringAndSize(t->s, t->n - 1);
}
#else
if (t->is_unicode | t->is_str) {
if (t->intern) {
*t->p = PyUnicode_InternFromString(t->s);
} else if (t->encoding) {
*t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
} else {
*t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
}
} else {
*t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
}
#endif
if (!*t->p)
return -1;
if (PyObject_Hash(*t->p) == -1)
PyErr_Clear();
++t;
}
return 0;
}
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) {
return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str));
}
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) {
Py_ssize_t ignore;
return __Pyx_PyObject_AsStringAndSize(o, &ignore);
}
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
#if CYTHON_COMPILING_IN_CPYTHON && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
if (
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
__Pyx_sys_getdefaultencoding_not_ascii &&
#endif
PyUnicode_Check(o)) {
#if PY_VERSION_HEX < 0x03030000
char* defenc_c;
PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL);
if (!defenc) return NULL;
defenc_c = PyBytes_AS_STRING(defenc);
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
{
char* end = defenc_c + PyBytes_GET_SIZE(defenc);
char* c;
for (c = defenc_c; c < end; c++) {
if ((unsigned char) (*c) >= 128) {
PyUnicode_AsASCIIString(o);
return NULL;
}
}
}
#endif
*length = PyBytes_GET_SIZE(defenc);
return defenc_c;
#else
if (__Pyx_PyUnicode_READY(o) == -1) return NULL;
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
if (PyUnicode_IS_ASCII(o)) {
*length = PyUnicode_GET_LENGTH(o);
return PyUnicode_AsUTF8(o);
} else {
PyUnicode_AsASCIIString(o);
return NULL;
}
#else
return PyUnicode_AsUTF8AndSize(o, length);
#endif
#endif
} else
#endif
#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE))
if (PyByteArray_Check(o)) {
*length = PyByteArray_GET_SIZE(o);
return PyByteArray_AS_STRING(o);
} else
#endif
{
char* result;
int r = PyBytes_AsStringAndSize(o, &result, length);
if (unlikely(r < 0)) {
return NULL;
} else {
return result;
}
}
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
int is_true = x == Py_True;
if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
else return PyObject_IsTrue(x);
}
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) {
#if CYTHON_USE_TYPE_SLOTS
PyNumberMethods *m;
#endif
const char *name = NULL;
PyObject *res = NULL;
#if PY_MAJOR_VERSION < 3
if (PyInt_Check(x) || PyLong_Check(x))
#else
if (PyLong_Check(x))
#endif
return __Pyx_NewRef(x);
#if CYTHON_USE_TYPE_SLOTS
m = Py_TYPE(x)->tp_as_number;
#if PY_MAJOR_VERSION < 3
if (m && m->nb_int) {
name = "int";
res = PyNumber_Int(x);
}
else if (m && m->nb_long) {
name = "long";
res = PyNumber_Long(x);
}
#else
if (m && m->nb_int) {
name = "int";
res = PyNumber_Long(x);
}
#endif
#else
res = PyNumber_Int(x);
#endif
if (res) {
#if PY_MAJOR_VERSION < 3
if (!PyInt_Check(res) && !PyLong_Check(res)) {
#else
if (!PyLong_Check(res)) {
#endif
PyErr_Format(PyExc_TypeError,
"__%.4s__ returned non-%.4s (type %.200s)",
name, name, Py_TYPE(res)->tp_name);
Py_DECREF(res);
return NULL;
}
}
else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_TypeError,
"an integer is required");
}
return res;
}
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
Py_ssize_t ival;
PyObject *x;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(b))) {
if (sizeof(Py_ssize_t) >= sizeof(long))
return PyInt_AS_LONG(b);
else
return PyInt_AsSsize_t(x);
}
#endif
if (likely(PyLong_CheckExact(b))) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)b)->ob_digit;
const Py_ssize_t size = Py_SIZE(b);
if (likely(__Pyx_sst_abs(size) <= 1)) {
ival = likely(size) ? digits[0] : 0;
if (size == -1) ival = -ival;
return ival;
} else {
switch (size) {
case 2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
}
}
#endif
return PyLong_AsSsize_t(b);
}
x = PyNumber_Index(b);
if (!x) return -1;
ival = PyInt_AsSsize_t(x);
Py_DECREF(x);
return ival;
}
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
return PyInt_FromSize_t(ival);
}
#endif /* Py_PYTHON_H */
|
cache.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC AAA CCCC H H EEEEE %
% C A A C H H E %
% C AAAAA C HHHHH EEE %
% C A A C H H E %
% CCCC A A CCCC H H EEEEE %
% %
% %
% MagickCore Pixel Cache Methods %
% %
% Software Design %
% Cristy %
% July 1999 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/distribute-cache-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/nt-base-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/policy.h"
#include "MagickCore/quantum.h"
#include "MagickCore/random_.h"
#include "MagickCore/registry.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/timer-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
#if defined(MAGICKCORE_ZLIB_DELEGATE)
#include "zlib.h"
#endif
/*
Define declarations.
*/
#define CacheTick(offset,extent) QuantumTick((MagickOffsetType) offset,extent)
#define IsFileDescriptorLimitExceeded() (GetMagickResource(FileResource) > \
GetMagickResourceLimit(FileResource) ? MagickTrue : MagickFalse)
/*
Typedef declarations.
*/
typedef struct _MagickModulo
{
ssize_t
quotient,
remainder;
} MagickModulo;
/*
Forward declarations.
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static Cache
GetImagePixelCache(Image *,const MagickBooleanType,ExceptionInfo *)
magick_hot_spot;
static const Quantum
*GetVirtualPixelCache(const Image *,const VirtualPixelMethod,const ssize_t,
const ssize_t,const size_t,const size_t,ExceptionInfo *),
*GetVirtualPixelsCache(const Image *);
static const void
*GetVirtualMetacontentFromCache(const Image *);
static MagickBooleanType
GetOneAuthenticPixelFromCache(Image *,const ssize_t,const ssize_t,Quantum *,
ExceptionInfo *),
GetOneVirtualPixelFromCache(const Image *,const VirtualPixelMethod,
const ssize_t,const ssize_t,Quantum *,ExceptionInfo *),
OpenPixelCache(Image *,const MapMode,ExceptionInfo *),
OpenPixelCacheOnDisk(CacheInfo *,const MapMode),
ReadPixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict,
ExceptionInfo *),
ReadPixelCacheMetacontent(CacheInfo *magick_restrict,
NexusInfo *magick_restrict,ExceptionInfo *),
SyncAuthenticPixelsCache(Image *,ExceptionInfo *),
WritePixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict,
ExceptionInfo *),
WritePixelCacheMetacontent(CacheInfo *,NexusInfo *magick_restrict,
ExceptionInfo *);
static Quantum
*GetAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t,
const size_t,ExceptionInfo *),
*QueueAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t,
const size_t,ExceptionInfo *),
*SetPixelCacheNexusPixels(const CacheInfo *magick_restrict,const MapMode,
const ssize_t,const ssize_t,const size_t,const size_t,
const MagickBooleanType,NexusInfo *magick_restrict,ExceptionInfo *)
magick_hot_spot;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
static void
CopyOpenCLBuffer(CacheInfo *magick_restrict);
#endif
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
/*
Global declarations.
*/
static SemaphoreInfo
*cache_semaphore = (SemaphoreInfo *) NULL;
static ssize_t
cache_anonymous_memory = (-1);
static time_t
cache_epoch = 0;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCache() acquires a pixel cache.
%
% The format of the AcquirePixelCache() method is:
%
% Cache AcquirePixelCache(const size_t number_threads)
%
% A description of each parameter follows:
%
% o number_threads: the number of nexus threads.
%
*/
MagickPrivate Cache AcquirePixelCache(const size_t number_threads)
{
CacheInfo
*magick_restrict cache_info;
char
*value;
cache_info=(CacheInfo *) AcquireAlignedMemory(1,sizeof(*cache_info));
if (cache_info == (CacheInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(cache_info,0,sizeof(*cache_info));
cache_info->type=UndefinedCache;
cache_info->mode=IOMode;
cache_info->disk_mode=IOMode;
cache_info->colorspace=sRGBColorspace;
cache_info->file=(-1);
cache_info->id=GetMagickThreadId();
cache_info->number_threads=number_threads;
if (GetOpenMPMaximumThreads() > cache_info->number_threads)
cache_info->number_threads=GetOpenMPMaximumThreads();
if (GetMagickResourceLimit(ThreadResource) > cache_info->number_threads)
cache_info->number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
if (cache_info->number_threads == 0)
cache_info->number_threads=1;
cache_info->nexus_info=AcquirePixelCacheNexus(cache_info->number_threads);
if (cache_info->nexus_info == (NexusInfo **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
value=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (value != (const char *) NULL)
{
cache_info->synchronize=IsStringTrue(value);
value=DestroyString(value);
}
value=GetPolicyValue("cache:synchronize");
if (value != (const char *) NULL)
{
cache_info->synchronize=IsStringTrue(value);
value=DestroyString(value);
}
cache_info->width_limit=GetMagickResourceLimit(WidthResource);
cache_info->height_limit=GetMagickResourceLimit(HeightResource);
cache_info->semaphore=AcquireSemaphoreInfo();
cache_info->reference_count=1;
cache_info->file_semaphore=AcquireSemaphoreInfo();
cache_info->debug=IsEventLogging();
cache_info->signature=MagickCoreSignature;
return((Cache ) cache_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCacheNexus() allocates the NexusInfo structure.
%
% The format of the AcquirePixelCacheNexus method is:
%
% NexusInfo **AcquirePixelCacheNexus(const size_t number_threads)
%
% A description of each parameter follows:
%
% o number_threads: the number of nexus threads.
%
*/
MagickPrivate NexusInfo **AcquirePixelCacheNexus(const size_t number_threads)
{
NexusInfo
**magick_restrict nexus_info;
register ssize_t
i;
nexus_info=(NexusInfo **) MagickAssumeAligned(AcquireAlignedMemory(2*
number_threads,sizeof(*nexus_info)));
if (nexus_info == (NexusInfo **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
*nexus_info=(NexusInfo *) AcquireQuantumMemory(2*number_threads,
sizeof(**nexus_info));
if (*nexus_info == (NexusInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(*nexus_info,0,2*number_threads*sizeof(**nexus_info));
for (i=0; i < (ssize_t) (2*number_threads); i++)
{
nexus_info[i]=(*nexus_info+i);
if (i < (ssize_t) number_threads)
nexus_info[i]->virtual_nexus=(*nexus_info+number_threads+i);
nexus_info[i]->signature=MagickCoreSignature;
}
return(nexus_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCachePixels() returns the pixels associated with the specified
% image.
%
% The format of the AcquirePixelCachePixels() method is:
%
% void *AcquirePixelCachePixels(const Image *image,size_t *length,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o length: the pixel cache length.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void *AcquirePixelCachePixels(const Image *image,size_t *length,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*length=0;
if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache))
return((void *) NULL);
*length=(size_t) cache_info->length;
return(cache_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a c h e C o m p o n e n t G e n e s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CacheComponentGenesis() instantiates the cache component.
%
% The format of the CacheComponentGenesis method is:
%
% MagickBooleanType CacheComponentGenesis(void)
%
*/
MagickPrivate MagickBooleanType CacheComponentGenesis(void)
{
if (cache_semaphore == (SemaphoreInfo *) NULL)
cache_semaphore=AcquireSemaphoreInfo();
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a c h e C o m p o n e n t T e r m i n u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CacheComponentTerminus() destroys the cache component.
%
% The format of the CacheComponentTerminus() method is:
%
% CacheComponentTerminus(void)
%
*/
MagickPrivate void CacheComponentTerminus(void)
{
if (cache_semaphore == (SemaphoreInfo *) NULL)
ActivateSemaphoreInfo(&cache_semaphore);
/* no op-- nothing to destroy */
RelinquishSemaphoreInfo(&cache_semaphore);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l i p P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClipPixelCacheNexus() clips the cache nexus as defined by the image clip
% mask. The method returns MagickTrue if the pixel region is clipped,
% otherwise MagickFalse.
%
% The format of the ClipPixelCacheNexus() method is:
%
% MagickBooleanType ClipPixelCacheNexus(Image *image,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to clip.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ClipPixelCacheNexus(Image *image,
NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickSizeType
number_pixels;
register Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
n;
/*
Apply clip mask.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->channels & WriteMaskChannel) == 0)
return(MagickTrue);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return(MagickFalse);
p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y,
nexus_info->region.width,nexus_info->region.height,
nexus_info->virtual_nexus,exception);
q=nexus_info->pixels;
number_pixels=(MagickSizeType) nexus_info->region.width*
nexus_info->region.height;
for (n=0; n < (ssize_t) number_pixels; n++)
{
double
mask_alpha;
register ssize_t
i;
if (p == (Quantum *) NULL)
break;
mask_alpha=QuantumScale*GetPixelWriteMask(image,p);
if (fabs(mask_alpha) >= MagickEpsilon)
{
for (i=0; i < (ssize_t) image->number_channels; i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(MagickOver_((double) p[i],mask_alpha*
GetPixelAlpha(image,p),(double) q[i],(double)
GetPixelAlpha(image,q)));
}
SetPixelAlpha(image,GetPixelAlpha(image,p),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
return(n < (ssize_t) number_pixels ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCache() clones a pixel cache.
%
% The format of the ClonePixelCache() method is:
%
% Cache ClonePixelCache(const Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
MagickPrivate Cache ClonePixelCache(const Cache cache)
{
CacheInfo
*magick_restrict clone_info;
const CacheInfo
*magick_restrict cache_info;
assert(cache != NULL);
cache_info=(const CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
clone_info=(CacheInfo *) AcquirePixelCache(cache_info->number_threads);
clone_info->virtual_pixel_method=cache_info->virtual_pixel_method;
return((Cache ) clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCacheMethods() clones the pixel cache methods from one cache to
% another.
%
% The format of the ClonePixelCacheMethods() method is:
%
% void ClonePixelCacheMethods(Cache clone,const Cache cache)
%
% A description of each parameter follows:
%
% o clone: Specifies a pointer to a Cache structure.
%
% o cache: the pixel cache.
%
*/
MagickPrivate void ClonePixelCacheMethods(Cache clone,const Cache cache)
{
CacheInfo
*magick_restrict cache_info,
*magick_restrict source_info;
assert(clone != (Cache) NULL);
source_info=(CacheInfo *) clone;
assert(source_info->signature == MagickCoreSignature);
if (source_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
source_info->filename);
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
source_info->methods=cache_info->methods;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e R e p o s i t o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCacheRepository() clones the source pixel cache to the destination
% cache.
%
% The format of the ClonePixelCacheRepository() method is:
%
% MagickBooleanType ClonePixelCacheRepository(CacheInfo *cache_info,
% CacheInfo *source_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o source_info: the source pixel cache.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ClonePixelCacheOnDisk(
CacheInfo *magick_restrict cache_info,CacheInfo *magick_restrict clone_info)
{
MagickSizeType
extent;
size_t
quantum;
ssize_t
count;
struct stat
file_stats;
unsigned char
*buffer;
/*
Clone pixel cache on disk with identical morphology.
*/
if ((OpenPixelCacheOnDisk(cache_info,ReadMode) == MagickFalse) ||
(OpenPixelCacheOnDisk(clone_info,IOMode) == MagickFalse))
return(MagickFalse);
if ((lseek(cache_info->file,0,SEEK_SET) < 0) ||
(lseek(clone_info->file,0,SEEK_SET) < 0))
return(MagickFalse);
quantum=(size_t) MagickMaxBufferExtent;
if ((fstat(cache_info->file,&file_stats) == 0) && (file_stats.st_size > 0))
quantum=(size_t) MagickMin(file_stats.st_size,MagickMaxBufferExtent);
buffer=(unsigned char *) AcquireQuantumMemory(quantum,sizeof(*buffer));
if (buffer == (unsigned char *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
extent=0;
while ((count=read(cache_info->file,buffer,quantum)) > 0)
{
ssize_t
number_bytes;
number_bytes=write(clone_info->file,buffer,(size_t) count);
if (number_bytes != count)
break;
extent+=number_bytes;
}
buffer=(unsigned char *) RelinquishMagickMemory(buffer);
if (extent != cache_info->length)
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType ClonePixelCacheRepository(
CacheInfo *magick_restrict clone_info,CacheInfo *magick_restrict cache_info,
ExceptionInfo *exception)
{
#define MaxCacheThreads ((size_t) GetMagickResourceLimit(ThreadResource))
#define cache_number_threads(source,destination,chunk,multithreaded) \
num_threads((multithreaded) == 0 ? 1 : \
(((source)->type != MemoryCache) && ((source)->type != MapCache)) || \
(((destination)->type != MemoryCache) && ((destination)->type != MapCache)) ? \
MagickMax(MagickMin(GetMagickResourceLimit(ThreadResource),2),1) : \
MagickMax(MagickMin((ssize_t) GetMagickResourceLimit(ThreadResource),(ssize_t) (chunk)/256),1))
MagickBooleanType
optimize,
status;
NexusInfo
**magick_restrict cache_nexus,
**magick_restrict clone_nexus;
size_t
length;
ssize_t
y;
assert(cache_info != (CacheInfo *) NULL);
assert(clone_info != (CacheInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
if (cache_info->type == PingCache)
return(MagickTrue);
length=cache_info->number_channels*sizeof(*cache_info->channel_map);
if ((cache_info->storage_class == clone_info->storage_class) &&
(cache_info->colorspace == clone_info->colorspace) &&
(cache_info->alpha_trait == clone_info->alpha_trait) &&
(cache_info->channels == clone_info->channels) &&
(cache_info->columns == clone_info->columns) &&
(cache_info->rows == clone_info->rows) &&
(cache_info->number_channels == clone_info->number_channels) &&
(memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) &&
(cache_info->metacontent_extent == clone_info->metacontent_extent))
{
/*
Identical pixel cache morphology.
*/
if (((cache_info->type == MemoryCache) ||
(cache_info->type == MapCache)) &&
((clone_info->type == MemoryCache) || (clone_info->type == MapCache)))
{
(void) memcpy(clone_info->pixels,cache_info->pixels,
cache_info->number_channels*cache_info->columns*cache_info->rows*
sizeof(*cache_info->pixels));
if ((cache_info->metacontent_extent != 0) &&
(clone_info->metacontent_extent != 0))
(void) memcpy(clone_info->metacontent,cache_info->metacontent,
cache_info->columns*cache_info->rows*
clone_info->metacontent_extent*sizeof(unsigned char));
return(MagickTrue);
}
if ((cache_info->type == DiskCache) && (clone_info->type == DiskCache))
return(ClonePixelCacheOnDisk(cache_info,clone_info));
}
/*
Mismatched pixel cache morphology.
*/
cache_nexus=AcquirePixelCacheNexus(cache_info->number_threads);
clone_nexus=AcquirePixelCacheNexus(clone_info->number_threads);
length=cache_info->number_channels*sizeof(*cache_info->channel_map);
optimize=(cache_info->number_channels == clone_info->number_channels) &&
(memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) ?
MagickTrue : MagickFalse;
length=(size_t) MagickMin(cache_info->number_channels*cache_info->columns,
clone_info->number_channels*clone_info->columns);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
cache_number_threads(cache_info,clone_info,cache_info->rows,1)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*pixels;
register ssize_t
x;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y,
cache_info->columns,1,MagickFalse,cache_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
status=ReadPixelCachePixels(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y,
clone_info->columns,1,MagickFalse,clone_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
(void) memset(clone_nexus[id]->pixels,0,(size_t) clone_nexus[id]->length);
if (optimize != MagickFalse)
(void) memcpy(clone_nexus[id]->pixels,cache_nexus[id]->pixels,length*
sizeof(Quantum));
else
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
/*
Mismatched pixel channel map.
*/
p=cache_nexus[id]->pixels;
q=clone_nexus[id]->pixels;
for (x=0; x < (ssize_t) cache_info->columns; x++)
{
register ssize_t
i;
if (x == (ssize_t) clone_info->columns)
break;
for (i=0; i < (ssize_t) clone_info->number_channels; i++)
{
PixelChannel
channel;
PixelTrait
traits;
channel=clone_info->channel_map[i].channel;
traits=cache_info->channel_map[channel].traits;
if (traits != UndefinedPixelTrait)
*q=*(p+cache_info->channel_map[channel].offset);
q++;
}
p+=cache_info->number_channels;
}
}
status=WritePixelCachePixels(clone_info,clone_nexus[id],exception);
}
if ((cache_info->metacontent_extent != 0) &&
(clone_info->metacontent_extent != 0))
{
/*
Clone metacontent.
*/
length=(size_t) MagickMin(cache_info->metacontent_extent,
clone_info->metacontent_extent);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
cache_number_threads(cache_info,clone_info,cache_info->rows,1)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*pixels;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y,
cache_info->columns,1,MagickFalse,cache_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
status=ReadPixelCacheMetacontent(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y,
clone_info->columns,1,MagickFalse,clone_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
if ((clone_nexus[id]->metacontent != (void *) NULL) &&
(cache_nexus[id]->metacontent != (void *) NULL))
(void) memcpy(clone_nexus[id]->metacontent,
cache_nexus[id]->metacontent,length*sizeof(unsigned char));
status=WritePixelCacheMetacontent(clone_info,clone_nexus[id],exception);
}
}
clone_nexus=DestroyPixelCacheNexus(clone_nexus,clone_info->number_threads);
cache_nexus=DestroyPixelCacheNexus(cache_nexus,cache_info->number_threads);
if (cache_info->debug != MagickFalse)
{
char
message[MagickPathExtent];
(void) FormatLocaleString(message,MagickPathExtent,"%s => %s",
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type),
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) clone_info->type));
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImagePixelCache() deallocates memory associated with the pixel cache.
%
% The format of the DestroyImagePixelCache() method is:
%
% void DestroyImagePixelCache(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void DestroyImagePixelCache(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->cache != (void *) NULL)
image->cache=DestroyPixelCache(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImagePixels() deallocates memory associated with the pixel cache.
%
% The format of the DestroyImagePixels() method is:
%
% void DestroyImagePixels(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DestroyImagePixels(Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.destroy_pixel_handler != (DestroyPixelHandler) NULL)
{
cache_info->methods.destroy_pixel_handler(image);
return;
}
image->cache=DestroyPixelCache(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelCache() deallocates memory associated with the pixel cache.
%
% The format of the DestroyPixelCache() method is:
%
% Cache DestroyPixelCache(Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
static MagickBooleanType ClosePixelCacheOnDisk(CacheInfo *cache_info)
{
int
status;
status=(-1);
if (cache_info->file != -1)
{
status=close(cache_info->file);
cache_info->file=(-1);
RelinquishMagickResource(FileResource,1);
}
return(status == -1 ? MagickFalse : MagickTrue);
}
static inline void RelinquishPixelCachePixels(CacheInfo *cache_info)
{
switch (cache_info->type)
{
case MemoryCache:
{
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (cache_info->opencl != (MagickCLCacheInfo) NULL)
{
cache_info->opencl=RelinquishMagickCLCacheInfo(cache_info->opencl,
MagickTrue);
cache_info->pixels=(Quantum *) NULL;
break;
}
#endif
if (cache_info->mapped == MagickFalse)
cache_info->pixels=(Quantum *) RelinquishAlignedMemory(
cache_info->pixels);
else
(void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length);
RelinquishMagickResource(MemoryResource,cache_info->length);
break;
}
case MapCache:
{
(void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length);
cache_info->pixels=(Quantum *) NULL;
if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode))
(void) RelinquishUniqueFileResource(cache_info->cache_filename);
*cache_info->cache_filename='\0';
RelinquishMagickResource(MapResource,cache_info->length);
}
case DiskCache:
{
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode))
(void) RelinquishUniqueFileResource(cache_info->cache_filename);
*cache_info->cache_filename='\0';
RelinquishMagickResource(DiskResource,cache_info->length);
break;
}
case DistributedCache:
{
*cache_info->cache_filename='\0';
(void) RelinquishDistributePixelCache((DistributeCacheInfo *)
cache_info->server_info);
break;
}
default:
break;
}
cache_info->type=UndefinedCache;
cache_info->mapped=MagickFalse;
cache_info->metacontent=(void *) NULL;
}
MagickPrivate Cache DestroyPixelCache(Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
LockSemaphoreInfo(cache_info->semaphore);
cache_info->reference_count--;
if (cache_info->reference_count != 0)
{
UnlockSemaphoreInfo(cache_info->semaphore);
return((Cache) NULL);
}
UnlockSemaphoreInfo(cache_info->semaphore);
if (cache_info->debug != MagickFalse)
{
char
message[MagickPathExtent];
(void) FormatLocaleString(message,MagickPathExtent,"destroy %s",
cache_info->filename);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
RelinquishPixelCachePixels(cache_info);
if (cache_info->server_info != (DistributeCacheInfo *) NULL)
cache_info->server_info=DestroyDistributeCacheInfo((DistributeCacheInfo *)
cache_info->server_info);
if (cache_info->nexus_info != (NexusInfo **) NULL)
cache_info->nexus_info=DestroyPixelCacheNexus(cache_info->nexus_info,
cache_info->number_threads);
if (cache_info->random_info != (RandomInfo *) NULL)
cache_info->random_info=DestroyRandomInfo(cache_info->random_info);
if (cache_info->file_semaphore != (SemaphoreInfo *) NULL)
RelinquishSemaphoreInfo(&cache_info->file_semaphore);
if (cache_info->semaphore != (SemaphoreInfo *) NULL)
RelinquishSemaphoreInfo(&cache_info->semaphore);
cache_info->signature=(~MagickCoreSignature);
cache_info=(CacheInfo *) RelinquishAlignedMemory(cache_info);
cache=(Cache) NULL;
return(cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelCacheNexus() destroys a pixel cache nexus.
%
% The format of the DestroyPixelCacheNexus() method is:
%
% NexusInfo **DestroyPixelCacheNexus(NexusInfo *nexus_info,
% const size_t number_threads)
%
% A description of each parameter follows:
%
% o nexus_info: the nexus to destroy.
%
% o number_threads: the number of nexus threads.
%
*/
static inline void RelinquishCacheNexusPixels(NexusInfo *nexus_info)
{
if (nexus_info->mapped == MagickFalse)
(void) RelinquishAlignedMemory(nexus_info->cache);
else
(void) UnmapBlob(nexus_info->cache,(size_t) nexus_info->length);
nexus_info->cache=(Quantum *) NULL;
nexus_info->pixels=(Quantum *) NULL;
nexus_info->metacontent=(void *) NULL;
nexus_info->length=0;
nexus_info->mapped=MagickFalse;
}
MagickPrivate NexusInfo **DestroyPixelCacheNexus(NexusInfo **nexus_info,
const size_t number_threads)
{
register ssize_t
i;
assert(nexus_info != (NexusInfo **) NULL);
for (i=0; i < (ssize_t) (2*number_threads); i++)
{
if (nexus_info[i]->cache != (Quantum *) NULL)
RelinquishCacheNexusPixels(nexus_info[i]);
nexus_info[i]->signature=(~MagickCoreSignature);
}
*nexus_info=(NexusInfo *) RelinquishMagickMemory(*nexus_info);
nexus_info=(NexusInfo **) RelinquishAlignedMemory(nexus_info);
return(nexus_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticMetacontent() returns the authentic metacontent corresponding
% with the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is
% returned if the associated pixels are not available.
%
% The format of the GetAuthenticMetacontent() method is:
%
% void *GetAuthenticMetacontent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void *GetAuthenticMetacontent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_metacontent_from_handler !=
(GetAuthenticMetacontentFromHandler) NULL)
{
void
*metacontent;
metacontent=cache_info->methods.
get_authentic_metacontent_from_handler(image);
return(metacontent);
}
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c M e t a c o n t e n t F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticMetacontentFromCache() returns the meta-content corresponding
% with the last call to QueueAuthenticPixelsCache() or
% GetAuthenticPixelsCache().
%
% The format of the GetAuthenticMetacontentFromCache() method is:
%
% void *GetAuthenticMetacontentFromCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void *GetAuthenticMetacontentFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->metacontent);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c O p e n C L B u f f e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticOpenCLBuffer() returns an OpenCL buffer used to execute OpenCL
% operations.
%
% The format of the GetAuthenticOpenCLBuffer() method is:
%
% cl_mem GetAuthenticOpenCLBuffer(const Image *image,
% MagickCLDevice device,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o device: the device to use.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate cl_mem GetAuthenticOpenCLBuffer(const Image *image,
MagickCLDevice device,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(device != (const MagickCLDevice) NULL);
cache_info=(CacheInfo *) image->cache;
if ((cache_info->type == UndefinedCache) || (cache_info->reference_count > 1))
{
SyncImagePixelCache((Image *) image,exception);
cache_info=(CacheInfo *) image->cache;
}
if ((cache_info->type != MemoryCache) || (cache_info->mapped != MagickFalse))
return((cl_mem) NULL);
LockSemaphoreInfo(cache_info->semaphore);
if ((cache_info->opencl != (MagickCLCacheInfo) NULL) &&
(cache_info->opencl->device->context != device->context))
cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl);
if (cache_info->opencl == (MagickCLCacheInfo) NULL)
{
assert(cache_info->pixels != (Quantum *) NULL);
cache_info->opencl=AcquireMagickCLCacheInfo(device,cache_info->pixels,
cache_info->length);
}
if (cache_info->opencl != (MagickCLCacheInfo) NULL)
RetainOpenCLMemObject(cache_info->opencl->buffer);
UnlockSemaphoreInfo(cache_info->semaphore);
if (cache_info->opencl == (MagickCLCacheInfo) NULL)
return((cl_mem) NULL);
assert(cache_info->opencl->pixels == cache_info->pixels);
return(cache_info->opencl->buffer);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelCacheNexus() gets authentic pixels from the in-memory or
% disk pixel cache as defined by the geometry parameters. A pointer to the
% pixels is returned if the pixels are transferred, otherwise a NULL is
% returned.
%
% The format of the GetAuthenticPixelCacheNexus() method is:
%
% Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to return.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
Quantum
*magick_restrict pixels;
/*
Transfer pixels from the cache.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickTrue,
nexus_info,exception);
if (pixels == (Quantum *) NULL)
return((Quantum *) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(pixels);
if (ReadPixelCachePixels(cache_info,nexus_info,exception) == MagickFalse)
return((Quantum *) NULL);
if (cache_info->metacontent_extent != 0)
if (ReadPixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse)
return((Quantum *) NULL);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelsFromCache() returns the pixels associated with the last
% call to the QueueAuthenticPixelsCache() or GetAuthenticPixelsCache() methods.
%
% The format of the GetAuthenticPixelsFromCache() method is:
%
% Quantum *GetAuthenticPixelsFromCache(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static Quantum *GetAuthenticPixelsFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c P i x e l Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelQueue() returns the authentic pixels associated
% corresponding with the last call to QueueAuthenticPixels() or
% GetAuthenticPixels().
%
% The format of the GetAuthenticPixelQueue() method is:
%
% Quantum *GetAuthenticPixelQueue(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Quantum *GetAuthenticPixelQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_pixels_from_handler !=
(GetAuthenticPixelsFromHandler) NULL)
return(cache_info->methods.get_authentic_pixels_from_handler(image));
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixels() obtains a pixel region for read/write access. If the
% region is successfully accessed, a pointer to a Quantum array
% representing the region is returned, otherwise NULL is returned.
%
% The returned pointer may point to a temporary working copy of the pixels
% or it may point to the original pixels in memory. Performance is maximized
% if the selected region is part of one row, or one or more full rows, since
% then there is opportunity to access the pixels in-place (without a copy)
% if the image is in memory, or in a memory-mapped file. The returned pointer
% must *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% Quantum. If the image has corresponding metacontent,call
% GetAuthenticMetacontent() after invoking GetAuthenticPixels() to obtain the
% meta-content corresponding to the region. Once the Quantum array has
% been updated, the changes must be saved back to the underlying image using
% SyncAuthenticPixels() or they may be lost.
%
% The format of the GetAuthenticPixels() method is:
%
% Quantum *GetAuthenticPixels(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Quantum *GetAuthenticPixels(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*pixels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_pixels_handler !=
(GetAuthenticPixelsHandler) NULL)
{
pixels=cache_info->methods.get_authentic_pixels_handler(image,x,y,columns,
rows,exception);
return(pixels);
}
assert(id < (int) cache_info->number_threads);
pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l s C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelsCache() gets pixels from the in-memory or disk pixel cache
% as defined by the geometry parameters. A pointer to the pixels is returned
% if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetAuthenticPixelsCache() method is:
%
% Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return((Quantum *) NULL);
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageExtent() returns the extent of the pixels associated corresponding
% with the last call to QueueAuthenticPixels() or GetAuthenticPixels().
%
% The format of the GetImageExtent() method is:
%
% MagickSizeType GetImageExtent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickSizeType GetImageExtent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetPixelCacheNexusExtent(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixelCache() ensures that there is only a single reference to the
% pixel cache to be modified, updating the provided cache pointer to point to
% a clone of the original pixel cache if necessary.
%
% The format of the GetImagePixelCache method is:
%
% Cache GetImagePixelCache(Image *image,const MagickBooleanType clone,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clone: any value other than MagickFalse clones the cache pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType ValidatePixelCacheMorphology(
const Image *magick_restrict image)
{
const CacheInfo
*magick_restrict cache_info;
const PixelChannelMap
*magick_restrict p,
*magick_restrict q;
/*
Does the image match the pixel cache morphology?
*/
cache_info=(CacheInfo *) image->cache;
p=image->channel_map;
q=cache_info->channel_map;
if ((image->storage_class != cache_info->storage_class) ||
(image->colorspace != cache_info->colorspace) ||
(image->alpha_trait != cache_info->alpha_trait) ||
(image->channels != cache_info->channels) ||
(image->columns != cache_info->columns) ||
(image->rows != cache_info->rows) ||
(image->number_channels != cache_info->number_channels) ||
(memcmp(p,q,image->number_channels*sizeof(*p)) != 0) ||
(image->metacontent_extent != cache_info->metacontent_extent) ||
(cache_info->nexus_info == (NexusInfo **) NULL))
return(MagickFalse);
return(MagickTrue);
}
static Cache GetImagePixelCache(Image *image,const MagickBooleanType clone,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickBooleanType
destroy,
status;
static MagickSizeType
cache_timelimit = MagickResourceInfinity,
cpu_throttle = MagickResourceInfinity,
cycles = 0;
status=MagickTrue;
if (cpu_throttle == MagickResourceInfinity)
cpu_throttle=GetMagickResourceLimit(ThrottleResource);
if ((cpu_throttle != 0) && ((cycles++ % 32) == 0))
MagickDelay(cpu_throttle);
if (cache_epoch == 0)
{
/*
Set the expire time in seconds.
*/
cache_timelimit=GetMagickResourceLimit(TimeResource);
cache_epoch=GetMagickTime();
}
if ((cache_timelimit != MagickResourceInfinity) &&
((MagickSizeType) (GetMagickTime()-cache_epoch) >= cache_timelimit))
{
#if defined(ECANCELED)
errno=ECANCELED;
#endif
cache_info=(CacheInfo *) image->cache;
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
ThrowFatalException(ResourceLimitFatalError,"TimeLimitExceeded");
}
LockSemaphoreInfo(image->semaphore);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
destroy=MagickFalse;
if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode))
{
LockSemaphoreInfo(cache_info->semaphore);
if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode))
{
CacheInfo
*clone_info;
Image
clone_image;
/*
Clone pixel cache.
*/
clone_image=(*image);
clone_image.semaphore=AcquireSemaphoreInfo();
clone_image.reference_count=1;
clone_image.cache=ClonePixelCache(cache_info);
clone_info=(CacheInfo *) clone_image.cache;
status=OpenPixelCache(&clone_image,IOMode,exception);
if (status == MagickFalse)
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
else
{
if (clone != MagickFalse)
status=ClonePixelCacheRepository(clone_info,cache_info,
exception);
if (status == MagickFalse)
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
else
{
destroy=MagickTrue;
image->cache=clone_info;
}
}
RelinquishSemaphoreInfo(&clone_image.semaphore);
}
UnlockSemaphoreInfo(cache_info->semaphore);
}
if (destroy != MagickFalse)
cache_info=(CacheInfo *) DestroyPixelCache(cache_info);
if (status != MagickFalse)
{
/*
Ensure the image matches the pixel cache morphology.
*/
image->type=UndefinedType;
if (ValidatePixelCacheMorphology(image) == MagickFalse)
{
status=OpenPixelCache(image,IOMode,exception);
cache_info=(CacheInfo *) image->cache;
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
}
}
UnlockSemaphoreInfo(image->semaphore);
if (status == MagickFalse)
return((Cache) NULL);
return(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e P i x e l C a c h e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixelCacheType() returns the pixel cache type: UndefinedCache,
% DiskCache, MemoryCache, MapCache, or PingCache.
%
% The format of the GetImagePixelCacheType() method is:
%
% CacheType GetImagePixelCacheType(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport CacheType GetImagePixelCacheType(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->type);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e A u t h e n t i c P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneAuthenticPixel() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs.
%
% The format of the GetOneAuthenticPixel() method is:
%
% MagickBooleanType GetOneAuthenticPixel(const Image image,const ssize_t x,
% const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType CopyPixel(const Image *image,
const Quantum *source,Quantum *destination)
{
register ssize_t
i;
if (source == (const Quantum *) NULL)
{
destination[RedPixelChannel]=ClampToQuantum(image->background_color.red);
destination[GreenPixelChannel]=ClampToQuantum(
image->background_color.green);
destination[BluePixelChannel]=ClampToQuantum(
image->background_color.blue);
destination[BlackPixelChannel]=ClampToQuantum(
image->background_color.black);
destination[AlphaPixelChannel]=ClampToQuantum(
image->background_color.alpha);
return(MagickFalse);
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
destination[channel]=source[i];
}
return(MagickTrue);
}
MagickExport MagickBooleanType GetOneAuthenticPixel(Image *image,
const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
register Quantum
*magick_restrict q;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
if (cache_info->methods.get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL)
return(cache_info->methods.get_one_authentic_pixel_from_handler(image,x,y,pixel,exception));
q=GetAuthenticPixelsCache(image,x,y,1UL,1UL,exception);
return(CopyPixel(image,q,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O n e A u t h e n t i c P i x e l F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneAuthenticPixelFromCache() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs.
%
% The format of the GetOneAuthenticPixelFromCache() method is:
%
% MagickBooleanType GetOneAuthenticPixelFromCache(const Image image,
% const ssize_t x,const ssize_t y,Quantum *pixel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetOneAuthenticPixelFromCache(Image *image,
const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
q=GetAuthenticPixelCacheNexus(image,x,y,1UL,1UL,cache_info->nexus_info[id],
exception);
return(CopyPixel(image,q,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixel() returns a single virtual pixel at the specified
% (x,y) location. The image background color is returned if an error occurs.
% If you plan to modify the pixel, use GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualPixel() method is:
%
% MagickBooleanType GetOneVirtualPixel(const Image image,const ssize_t x,
% const ssize_t y,Quantum *pixel,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualPixel(const Image *image,
const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
if (cache_info->methods.get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
return(cache_info->methods.get_one_virtual_pixel_from_handler(image,
GetPixelCacheVirtualMethod(image),x,y,pixel,exception));
assert(id < (int) cache_info->number_threads);
p=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y,
1UL,1UL,cache_info->nexus_info[id],exception);
return(CopyPixel(image,p,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O n e V i r t u a l P i x e l F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixelFromCache() returns a single virtual pixel at the
% specified (x,y) location. The image background color is returned if an
% error occurs.
%
% The format of the GetOneVirtualPixelFromCache() method is:
%
% MagickBooleanType GetOneVirtualPixelFromCache(const Image image,
% const VirtualPixelMethod method,const ssize_t x,const ssize_t y,
% Quantum *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetOneVirtualPixelFromCache(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
return(CopyPixel(image,p,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l P i x e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixelInfo() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs. If
% you plan to modify the pixel, use GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualPixelInfo() method is:
%
% MagickBooleanType GetOneVirtualPixelInfo(const Image image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,PixelInfo *pixel,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: these values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualPixelInfo(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
PixelInfo *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
GetPixelInfo(image,pixel);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
if (p == (const Quantum *) NULL)
return(MagickFalse);
GetPixelInfoPixel(image,p,pixel);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheColorspace() returns the class type of the pixel cache.
%
% The format of the GetPixelCacheColorspace() method is:
%
% Colorspace GetPixelCacheColorspace(Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
MagickPrivate ColorspaceType GetPixelCacheColorspace(const Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->colorspace);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e F i l e n a m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheFilename() returns the filename associated with the pixel
% cache.
%
% The format of the GetPixelCacheFilename() method is:
%
% const char *GetPixelCacheFilename(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const char *GetPixelCacheFilename(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->cache_filename);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheMethods() initializes the CacheMethods structure.
%
% The format of the GetPixelCacheMethods() method is:
%
% void GetPixelCacheMethods(CacheMethods *cache_methods)
%
% A description of each parameter follows:
%
% o cache_methods: Specifies a pointer to a CacheMethods structure.
%
*/
MagickPrivate void GetPixelCacheMethods(CacheMethods *cache_methods)
{
assert(cache_methods != (CacheMethods *) NULL);
(void) memset(cache_methods,0,sizeof(*cache_methods));
cache_methods->get_virtual_pixel_handler=GetVirtualPixelCache;
cache_methods->get_virtual_pixels_handler=GetVirtualPixelsCache;
cache_methods->get_virtual_metacontent_from_handler=
GetVirtualMetacontentFromCache;
cache_methods->get_one_virtual_pixel_from_handler=GetOneVirtualPixelFromCache;
cache_methods->get_authentic_pixels_handler=GetAuthenticPixelsCache;
cache_methods->get_authentic_metacontent_from_handler=
GetAuthenticMetacontentFromCache;
cache_methods->get_authentic_pixels_from_handler=GetAuthenticPixelsFromCache;
cache_methods->get_one_authentic_pixel_from_handler=
GetOneAuthenticPixelFromCache;
cache_methods->queue_authentic_pixels_handler=QueueAuthenticPixelsCache;
cache_methods->sync_authentic_pixels_handler=SyncAuthenticPixelsCache;
cache_methods->destroy_pixel_handler=DestroyImagePixelCache;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e N e x u s E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheNexusExtent() returns the extent of the pixels associated
% corresponding with the last call to SetPixelCacheNexusPixels() or
% GetPixelCacheNexusPixels().
%
% The format of the GetPixelCacheNexusExtent() method is:
%
% MagickSizeType GetPixelCacheNexusExtent(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o nexus_info: the nexus info.
%
*/
MagickPrivate MagickSizeType GetPixelCacheNexusExtent(const Cache cache,
NexusInfo *magick_restrict nexus_info)
{
CacheInfo
*magick_restrict cache_info;
MagickSizeType
extent;
assert(cache != NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
extent=(MagickSizeType) nexus_info->region.width*nexus_info->region.height;
if (extent == 0)
return((MagickSizeType) cache_info->columns*cache_info->rows);
return(extent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCachePixels() returns the pixels associated with the specified image.
%
% The format of the GetPixelCachePixels() method is:
%
% void *GetPixelCachePixels(Image *image,MagickSizeType *length,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o length: the pixel cache length.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void *GetPixelCachePixels(Image *image,MagickSizeType *length,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
assert(length != (MagickSizeType *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*length=cache_info->length;
if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache))
return((void *) NULL);
return((void *) cache_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e S t o r a g e C l a s s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheStorageClass() returns the class type of the pixel cache.
%
% The format of the GetPixelCacheStorageClass() method is:
%
% ClassType GetPixelCacheStorageClass(Cache cache)
%
% A description of each parameter follows:
%
% o type: GetPixelCacheStorageClass returns DirectClass or PseudoClass.
%
% o cache: the pixel cache.
%
*/
MagickPrivate ClassType GetPixelCacheStorageClass(const Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->storage_class);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e T i l e S i z e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheTileSize() returns the pixel cache tile size.
%
% The format of the GetPixelCacheTileSize() method is:
%
% void GetPixelCacheTileSize(const Image *image,size_t *width,
% size_t *height)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the optimized cache tile width in pixels.
%
% o height: the optimized cache tile height in pixels.
%
*/
MagickPrivate void GetPixelCacheTileSize(const Image *image,size_t *width,
size_t *height)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*width=2048UL/(cache_info->number_channels*sizeof(Quantum));
if (GetImagePixelCacheType(image) == DiskCache)
*width=8192UL/(cache_info->number_channels*sizeof(Quantum));
*height=(*width);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e V i r t u a l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheVirtualMethod() gets the "virtual pixels" method for the
% pixel cache. A virtual pixel is any pixel access that is outside the
% boundaries of the image cache.
%
% The format of the GetPixelCacheVirtualMethod() method is:
%
% VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickPrivate VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->virtual_pixel_method);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l M e t a c o n t e n t F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualMetacontentFromCache() returns the meta-content corresponding with
% the last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache().
%
% The format of the GetVirtualMetacontentFromCache() method is:
%
% void *GetVirtualMetacontentFromCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static const void *GetVirtualMetacontentFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const void
*magick_restrict metacontent;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
metacontent=GetVirtualMetacontentFromNexus(cache_info,
cache_info->nexus_info[id]);
return(metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l M e t a c o n t e n t F r o m N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualMetacontentFromNexus() returns the meta-content for the specified
% cache nexus.
%
% The format of the GetVirtualMetacontentFromNexus() method is:
%
% const void *GetVirtualMetacontentFromNexus(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o nexus_info: the cache nexus to return the meta-content.
%
*/
MagickPrivate const void *GetVirtualMetacontentFromNexus(const Cache cache,
NexusInfo *magick_restrict nexus_info)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->storage_class == UndefinedClass)
return((void *) NULL);
return(nexus_info->metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualMetacontent() returns the virtual metacontent corresponding with
% the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is
% returned if the meta-content are not available.
%
% The format of the GetVirtualMetacontent() method is:
%
% const void *GetVirtualMetacontent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const void *GetVirtualMetacontent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const void
*magick_restrict metacontent;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
metacontent=cache_info->methods.get_virtual_metacontent_from_handler(image);
if (metacontent != (void *) NULL)
return(metacontent);
assert(id < (int) cache_info->number_threads);
metacontent=GetVirtualMetacontentFromNexus(cache_info,
cache_info->nexus_info[id]);
return(metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelCacheNexus() gets virtual pixels from the in-memory or disk
% pixel cache as defined by the geometry parameters. A pointer to the pixels
% is returned if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetVirtualPixelCacheNexus() method is:
%
% Quantum *GetVirtualPixelCacheNexus(const Image *image,
% const VirtualPixelMethod method,const ssize_t x,const ssize_t y,
% const size_t columns,const size_t rows,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to acquire.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t
DitherMatrix[64] =
{
0, 48, 12, 60, 3, 51, 15, 63,
32, 16, 44, 28, 35, 19, 47, 31,
8, 56, 4, 52, 11, 59, 7, 55,
40, 24, 36, 20, 43, 27, 39, 23,
2, 50, 14, 62, 1, 49, 13, 61,
34, 18, 46, 30, 33, 17, 45, 29,
10, 58, 6, 54, 9, 57, 5, 53,
42, 26, 38, 22, 41, 25, 37, 21
};
static inline ssize_t DitherX(const ssize_t x,const size_t columns)
{
ssize_t
index;
index=x+DitherMatrix[x & 0x07]-32L;
if (index < 0L)
return(0L);
if (index >= (ssize_t) columns)
return((ssize_t) columns-1L);
return(index);
}
static inline ssize_t DitherY(const ssize_t y,const size_t rows)
{
ssize_t
index;
index=y+DitherMatrix[y & 0x07]-32L;
if (index < 0L)
return(0L);
if (index >= (ssize_t) rows)
return((ssize_t) rows-1L);
return(index);
}
static inline ssize_t EdgeX(const ssize_t x,const size_t columns)
{
if (x < 0L)
return(0L);
if (x >= (ssize_t) columns)
return((ssize_t) (columns-1));
return(x);
}
static inline ssize_t EdgeY(const ssize_t y,const size_t rows)
{
if (y < 0L)
return(0L);
if (y >= (ssize_t) rows)
return((ssize_t) (rows-1));
return(y);
}
static inline ssize_t RandomX(RandomInfo *random_info,const size_t columns)
{
return((ssize_t) (columns*GetPseudoRandomValue(random_info)));
}
static inline ssize_t RandomY(RandomInfo *random_info,const size_t rows)
{
return((ssize_t) (rows*GetPseudoRandomValue(random_info)));
}
static inline MagickModulo VirtualPixelModulo(const ssize_t offset,
const size_t extent)
{
MagickModulo
modulo;
modulo.quotient=offset/((ssize_t) extent);
modulo.remainder=offset % ((ssize_t) extent);
if ((modulo.remainder != 0) && ((offset ^ ((ssize_t) extent)) < 0))
{
modulo.quotient-=1;
modulo.remainder+=((ssize_t) extent);
}
return(modulo);
}
MagickPrivate const Quantum *GetVirtualPixelCacheNexus(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
offset;
MagickSizeType
length,
number_pixels;
NexusInfo
*magick_restrict virtual_nexus;
Quantum
*magick_restrict pixels,
virtual_pixel[MaxPixelChannels];
register const Quantum
*magick_restrict p;
register const void
*magick_restrict r;
register Quantum
*magick_restrict q;
register ssize_t
i,
u;
register unsigned char
*magick_restrict s;
ssize_t
v;
void
*magick_restrict virtual_metacontent;
/*
Acquire pixels.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return((const Quantum *) NULL);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,x,y,columns,rows,
((image->channels & WriteMaskChannel) != 0) ||
((image->channels & CompositeMaskChannel) != 0) ? MagickTrue : MagickFalse,
nexus_info,exception);
if (pixels == (Quantum *) NULL)
return((const Quantum *) NULL);
q=pixels;
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) (nexus_info->region.height-1L)*cache_info->columns+
nexus_info->region.width-1L;
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
if ((offset >= 0) && (((MagickSizeType) offset+length) < number_pixels))
if ((x >= 0) && ((ssize_t) (x+columns-1) < (ssize_t) cache_info->columns) &&
(y >= 0) && ((ssize_t) (y+rows-1) < (ssize_t) cache_info->rows))
{
MagickBooleanType
status;
/*
Pixel request is inside cache extents.
*/
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(q);
status=ReadPixelCachePixels(cache_info,nexus_info,exception);
if (status == MagickFalse)
return((const Quantum *) NULL);
if (cache_info->metacontent_extent != 0)
{
status=ReadPixelCacheMetacontent(cache_info,nexus_info,exception);
if (status == MagickFalse)
return((const Quantum *) NULL);
}
return(q);
}
/*
Pixel request is outside cache extents.
*/
virtual_nexus=nexus_info->virtual_nexus;
s=(unsigned char *) nexus_info->metacontent;
(void) memset(virtual_pixel,0,cache_info->number_channels*
sizeof(*virtual_pixel));
virtual_metacontent=(void *) NULL;
switch (virtual_pixel_method)
{
case BackgroundVirtualPixelMethod:
case BlackVirtualPixelMethod:
case GrayVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
case EdgeVirtualPixelMethod:
case CheckerTileVirtualPixelMethod:
case HorizontalTileVirtualPixelMethod:
case VerticalTileVirtualPixelMethod:
{
if (cache_info->metacontent_extent != 0)
{
/*
Acquire a metacontent buffer.
*/
virtual_metacontent=(void *) AcquireQuantumMemory(1,
cache_info->metacontent_extent);
if (virtual_metacontent == (void *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
CacheError,"UnableToGetCacheNexus","`%s'",image->filename);
return((const Quantum *) NULL);
}
(void) memset(virtual_metacontent,0,cache_info->metacontent_extent);
}
switch (virtual_pixel_method)
{
case BlackVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel);
SetPixelAlpha(image,OpaqueAlpha,virtual_pixel);
break;
}
case GrayVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,QuantumRange/2,
virtual_pixel);
SetPixelAlpha(image,OpaqueAlpha,virtual_pixel);
break;
}
case TransparentVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel);
SetPixelAlpha(image,TransparentAlpha,virtual_pixel);
break;
}
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,QuantumRange,virtual_pixel);
SetPixelAlpha(image,OpaqueAlpha,virtual_pixel);
break;
}
default:
{
SetPixelRed(image,ClampToQuantum(image->background_color.red),
virtual_pixel);
SetPixelGreen(image,ClampToQuantum(image->background_color.green),
virtual_pixel);
SetPixelBlue(image,ClampToQuantum(image->background_color.blue),
virtual_pixel);
SetPixelBlack(image,ClampToQuantum(image->background_color.black),
virtual_pixel);
SetPixelAlpha(image,ClampToQuantum(image->background_color.alpha),
virtual_pixel);
break;
}
}
break;
}
default:
break;
}
for (v=0; v < (ssize_t) rows; v++)
{
ssize_t
y_offset;
y_offset=y+v;
if ((virtual_pixel_method == EdgeVirtualPixelMethod) ||
(virtual_pixel_method == UndefinedVirtualPixelMethod))
y_offset=EdgeY(y_offset,cache_info->rows);
for (u=0; u < (ssize_t) columns; u+=length)
{
ssize_t
x_offset;
x_offset=x+u;
length=(MagickSizeType) MagickMin(cache_info->columns-x_offset,columns-u);
if (((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) ||
((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) ||
(length == 0))
{
MagickModulo
x_modulo,
y_modulo;
/*
Transfer a single pixel.
*/
length=(MagickSizeType) 1;
switch (virtual_pixel_method)
{
case EdgeVirtualPixelMethod:
default:
{
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
EdgeX(x_offset,cache_info->columns),
EdgeY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,
nexus_info->virtual_nexus);
break;
}
case RandomVirtualPixelMethod:
{
if (cache_info->random_info == (RandomInfo *) NULL)
cache_info->random_info=AcquireRandomInfo();
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
RandomX(cache_info->random_info,cache_info->columns),
RandomY(cache_info->random_info,cache_info->rows),1UL,1UL,
virtual_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case DitherVirtualPixelMethod:
{
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
DitherX(x_offset,cache_info->columns),
DitherY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case TileVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case MirrorVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
if ((x_modulo.quotient & 0x01) == 1L)
x_modulo.remainder=(ssize_t) cache_info->columns-
x_modulo.remainder-1L;
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
if ((y_modulo.quotient & 0x01) == 1L)
y_modulo.remainder=(ssize_t) cache_info->rows-
y_modulo.remainder-1L;
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case HorizontalTileEdgeVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,EdgeY(y_offset,cache_info->rows),1UL,1UL,
virtual_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case VerticalTileEdgeVirtualPixelMethod:
{
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
EdgeX(x_offset,cache_info->columns),y_modulo.remainder,1UL,1UL,
virtual_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case BackgroundVirtualPixelMethod:
case BlackVirtualPixelMethod:
case GrayVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
case CheckerTileVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
if (((x_modulo.quotient ^ y_modulo.quotient) & 0x01) != 0L)
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case HorizontalTileVirtualPixelMethod:
{
if ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows))
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case VerticalTileVirtualPixelMethod:
{
if ((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns))
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
}
if (p == (const Quantum *) NULL)
break;
(void) memcpy(q,p,(size_t) (cache_info->number_channels*length*
sizeof(*p)));
q+=cache_info->number_channels;
if ((s != (void *) NULL) && (r != (const void *) NULL))
{
(void) memcpy(s,r,(size_t) cache_info->metacontent_extent);
s+=cache_info->metacontent_extent;
}
continue;
}
/*
Transfer a run of pixels.
*/
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x_offset,y_offset,
(size_t) length,1UL,virtual_nexus,exception);
if (p == (const Quantum *) NULL)
break;
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
(void) memcpy(q,p,(size_t) (cache_info->number_channels*length*
sizeof(*p)));
q+=cache_info->number_channels*length;
if ((r != (void *) NULL) && (s != (const void *) NULL))
{
(void) memcpy(s,r,(size_t) length);
s+=length*cache_info->metacontent_extent;
}
}
if (u < (ssize_t) columns)
break;
}
/*
Free resources.
*/
if (virtual_metacontent != (void *) NULL)
virtual_metacontent=(void *) RelinquishMagickMemory(virtual_metacontent);
if (v < (ssize_t) rows)
return((const Quantum *) NULL);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelCache() get virtual pixels from the in-memory or disk pixel
% cache as defined by the geometry parameters. A pointer to the pixels
% is returned if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetVirtualPixelCache() method is:
%
% const Quantum *GetVirtualPixelCache(const Image *image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const Quantum *GetVirtualPixelCache(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,columns,rows,
cache_info->nexus_info[id],exception);
return(p);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l P i x e l Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelQueue() returns the virtual pixels associated corresponding
% with the last call to QueueAuthenticPixels() or GetVirtualPixels().
%
% The format of the GetVirtualPixelQueue() method is:
%
% const Quantum *GetVirtualPixelQueue(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const Quantum *GetVirtualPixelQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_virtual_pixels_handler !=
(GetVirtualPixelsHandler) NULL)
return(cache_info->methods.get_virtual_pixels_handler(image));
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsNexus(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixels() returns an immutable pixel region. If the
% region is successfully accessed, a pointer to it is returned, otherwise
% NULL is returned. The returned pointer may point to a temporary working
% copy of the pixels or it may point to the original pixels in memory.
% Performance is maximized if the selected region is part of one row, or one
% or more full rows, since there is opportunity to access the pixels in-place
% (without a copy) if the image is in memory, or in a memory-mapped file. The
% returned pointer must *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% Quantum. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to
% access the meta-content (of type void) corresponding to the the
% region.
%
% If you plan to modify the pixels, use GetAuthenticPixels() instead.
%
% Note, the GetVirtualPixels() and GetAuthenticPixels() methods are not thread-
% safe. In a threaded environment, use GetCacheViewVirtualPixels() or
% GetCacheViewAuthenticPixels() instead.
%
% The format of the GetVirtualPixels() method is:
%
% const Quantum *GetVirtualPixels(const Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport const Quantum *GetVirtualPixels(const Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_virtual_pixel_handler !=
(GetVirtualPixelHandler) NULL)
return(cache_info->methods.get_virtual_pixel_handler(image,
GetPixelCacheVirtualMethod(image),x,y,columns,rows,exception));
assert(id < (int) cache_info->number_threads);
p=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y,
columns,rows,cache_info->nexus_info[id],exception);
return(p);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsCache() returns the pixels associated corresponding with the
% last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache().
%
% The format of the GetVirtualPixelsCache() method is:
%
% Quantum *GetVirtualPixelsCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static const Quantum *GetVirtualPixelsCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsNexus(image->cache,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsNexus() returns the pixels associated with the specified
% cache nexus.
%
% The format of the GetVirtualPixelsNexus() method is:
%
% const Quantum *GetVirtualPixelsNexus(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o nexus_info: the cache nexus to return the colormap pixels.
%
*/
MagickPrivate const Quantum *GetVirtualPixelsNexus(const Cache cache,
NexusInfo *magick_restrict nexus_info)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->storage_class == UndefinedClass)
return((Quantum *) NULL);
return((const Quantum *) nexus_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M a s k P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MaskPixelCacheNexus() masks the cache nexus as defined by the image mask.
% The method returns MagickTrue if the pixel region is masked, otherwise
% MagickFalse.
%
% The format of the MaskPixelCacheNexus() method is:
%
% MagickBooleanType MaskPixelCacheNexus(Image *image,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to clip.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Quantum ApplyPixelCompositeMask(const Quantum p,
const MagickRealType alpha,const Quantum q,const MagickRealType beta)
{
double
mask_alpha;
Quantum
pixel;
if (fabs(alpha-OpaqueAlpha) < MagickEpsilon)
return(p);
mask_alpha=1.0-QuantumScale*QuantumScale*alpha*beta;
mask_alpha=PerceptibleReciprocal(mask_alpha);
pixel=ClampToQuantum(mask_alpha*MagickOver_((double) p,alpha,(double) q,
beta));
return(pixel);
}
static MagickBooleanType MaskPixelCacheNexus(Image *image,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickSizeType
number_pixels;
register Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
n;
/*
Apply clip mask.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->channels & CompositeMaskChannel) == 0)
return(MagickTrue);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return(MagickFalse);
p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y,
nexus_info->region.width,nexus_info->region.height,
nexus_info->virtual_nexus,exception);
q=nexus_info->pixels;
number_pixels=(MagickSizeType) nexus_info->region.width*
nexus_info->region.height;
for (n=0; n < (ssize_t) number_pixels; n++)
{
double
mask_alpha;
register ssize_t
i;
if (p == (Quantum *) NULL)
break;
mask_alpha=(double) GetPixelCompositeMask(image,p);
for (i=0; i < (ssize_t) image->number_channels; i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ApplyPixelCompositeMask(p[i],mask_alpha,q[i],(MagickRealType)
GetPixelAlpha(image,q));
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
if (n < (ssize_t) number_pixels)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ O p e n P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OpenPixelCache() allocates the pixel cache. This includes defining the cache
% dimensions, allocating space for the image pixels and optionally the
% metacontent, and memory mapping the cache if it is disk based. The cache
% nexus array is initialized as well.
%
% The format of the OpenPixelCache() method is:
%
% MagickBooleanType OpenPixelCache(Image *image,const MapMode mode,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o mode: ReadMode, WriteMode, or IOMode.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType OpenPixelCacheOnDisk(CacheInfo *cache_info,
const MapMode mode)
{
int
file;
/*
Open pixel cache on disk.
*/
if ((cache_info->file != -1) && (cache_info->disk_mode == mode))
return(MagickTrue); /* cache already open and in the proper mode */
if (*cache_info->cache_filename == '\0')
file=AcquireUniqueFileResource(cache_info->cache_filename);
else
switch (mode)
{
case ReadMode:
{
file=open_utf8(cache_info->cache_filename,O_RDONLY | O_BINARY,0);
break;
}
case WriteMode:
{
file=open_utf8(cache_info->cache_filename,O_WRONLY | O_CREAT |
O_BINARY | O_EXCL,S_MODE);
if (file == -1)
file=open_utf8(cache_info->cache_filename,O_WRONLY | O_BINARY,S_MODE);
break;
}
case IOMode:
default:
{
file=open_utf8(cache_info->cache_filename,O_RDWR | O_CREAT | O_BINARY |
O_EXCL,S_MODE);
if (file == -1)
file=open_utf8(cache_info->cache_filename,O_RDWR | O_BINARY,S_MODE);
break;
}
}
if (file == -1)
return(MagickFalse);
(void) AcquireMagickResource(FileResource,1);
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
cache_info->file=file;
cache_info->disk_mode=mode;
return(MagickTrue);
}
static inline MagickOffsetType WritePixelCacheRegion(
const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset,
const MagickSizeType length,const unsigned char *magick_restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PWRITE)
if (lseek(cache_info->file,offset,SEEK_SET) < 0)
return((MagickOffsetType) -1);
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PWRITE)
count=write(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX));
#else
count=pwrite(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX),offset+i);
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
return(i);
}
static MagickBooleanType SetPixelCacheExtent(Image *image,MagickSizeType length)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
count,
extent,
offset;
cache_info=(CacheInfo *) image->cache;
if (image->debug != MagickFalse)
{
char
format[MagickPathExtent],
message[MagickPathExtent];
(void) FormatMagickSize(length,MagickFalse,"B",MagickPathExtent,format);
(void) FormatLocaleString(message,MagickPathExtent,
"extend %s (%s[%d], disk, %s)",cache_info->filename,
cache_info->cache_filename,cache_info->file,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
if (length != (MagickSizeType) ((MagickOffsetType) length))
return(MagickFalse);
offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_END);
if (offset < 0)
return(MagickFalse);
if ((MagickSizeType) offset >= length)
count=(MagickOffsetType) 1;
else
{
extent=(MagickOffsetType) length-1;
count=WritePixelCacheRegion(cache_info,extent,1,(const unsigned char *)
"");
if (count != 1)
return(MagickFalse);
#if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE)
if (cache_info->synchronize != MagickFalse)
if (posix_fallocate(cache_info->file,offset+1,extent-offset) != 0)
return(MagickFalse);
#endif
}
offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_SET);
if (offset < 0)
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType OpenPixelCache(Image *image,const MapMode mode,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info,
source_info;
char
format[MagickPathExtent],
message[MagickPathExtent];
const char
*hosts,
*type;
MagickBooleanType
status;
MagickSizeType
length,
number_pixels;
size_t
columns,
packet_size;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (cache_anonymous_memory < 0)
{
char
*value;
/*
Does the security policy require anonymous mapping for pixel cache?
*/
cache_anonymous_memory=0;
value=GetPolicyValue("pixel-cache-memory");
if (value == (char *) NULL)
value=GetPolicyValue("cache:memory-map");
if (LocaleCompare(value,"anonymous") == 0)
{
#if defined(MAGICKCORE_HAVE_MMAP) && defined(MAP_ANONYMOUS)
cache_anonymous_memory=1;
#else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateError,"DelegateLibrarySupportNotBuiltIn",
"'%s' (policy requires anonymous memory mapping)",image->filename);
#endif
}
value=DestroyString(value);
}
if ((image->columns == 0) || (image->rows == 0))
ThrowBinaryException(CacheError,"NoPixelsDefinedInCache",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (((MagickSizeType) image->columns > cache_info->width_limit) ||
((MagickSizeType) image->rows > cache_info->height_limit))
ThrowBinaryException(ImageError,"WidthOrHeightExceedsLimit",
image->filename);
length=GetImageListLength(image);
if (AcquireMagickResource(ListLengthResource,length) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"ListLengthExceedsLimit",
image->filename);
source_info=(*cache_info);
source_info.file=(-1);
(void) FormatLocaleString(cache_info->filename,MagickPathExtent,"%s[%.20g]",
image->filename,(double) image->scene);
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
cache_info->alpha_trait=image->alpha_trait;
cache_info->channels=image->channels;
cache_info->rows=image->rows;
cache_info->columns=image->columns;
InitializePixelChannelMap(image);
cache_info->number_channels=GetPixelChannels(image);
(void) memcpy(cache_info->channel_map,image->channel_map,MaxPixelChannels*
sizeof(*image->channel_map));
cache_info->metacontent_extent=image->metacontent_extent;
cache_info->mode=mode;
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
packet_size=cache_info->number_channels*sizeof(Quantum);
if (image->metacontent_extent != 0)
packet_size+=cache_info->metacontent_extent;
length=number_pixels*packet_size;
columns=(size_t) (length/cache_info->rows/packet_size);
if ((cache_info->columns != columns) || ((ssize_t) cache_info->columns < 0) ||
((ssize_t) cache_info->rows < 0))
ThrowBinaryException(ResourceLimitError,"PixelCacheAllocationFailed",
image->filename);
cache_info->length=length;
if (image->ping != MagickFalse)
{
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
cache_info->type=PingCache;
return(MagickTrue);
}
status=AcquireMagickResource(AreaResource,(MagickSizeType)
cache_info->columns*cache_info->rows);
if (cache_info->mode == PersistMode)
status=MagickFalse;
length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+
cache_info->metacontent_extent);
if ((status != MagickFalse) &&
(length == (MagickSizeType) ((size_t) length)) &&
((cache_info->type == UndefinedCache) ||
(cache_info->type == MemoryCache)))
{
status=AcquireMagickResource(MemoryResource,cache_info->length);
if (status != MagickFalse)
{
status=MagickTrue;
if (cache_anonymous_memory <= 0)
{
cache_info->mapped=MagickFalse;
cache_info->pixels=(Quantum *) MagickAssumeAligned(
AcquireAlignedMemory(1,(size_t) cache_info->length));
}
else
{
cache_info->mapped=MagickTrue;
cache_info->pixels=(Quantum *) MapBlob(-1,IOMode,0,(size_t)
cache_info->length);
}
if (cache_info->pixels == (Quantum *) NULL)
{
cache_info->mapped=source_info.mapped;
cache_info->pixels=source_info.pixels;
}
else
{
/*
Create memory pixel cache.
*/
cache_info->type=MemoryCache;
cache_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
cache_info->metacontent=(void *) (cache_info->pixels+
cache_info->number_channels*number_pixels);
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickTrue,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s %s, %.20gx%.20gx%.20g %s)",
cache_info->filename,cache_info->mapped != MagickFalse ?
"Anonymous" : "Heap",type,(double) cache_info->columns,
(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
cache_info->storage_class=image->storage_class;
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
}
}
status=AcquireMagickResource(DiskResource,cache_info->length);
hosts=(const char *) GetImageRegistry(StringRegistryType,"cache:hosts",
exception);
if ((status == MagickFalse) && (hosts != (const char *) NULL))
{
DistributeCacheInfo
*server_info;
/*
Distribute the pixel cache to a remote server.
*/
server_info=AcquireDistributeCacheInfo(exception);
if (server_info != (DistributeCacheInfo *) NULL)
{
status=OpenDistributePixelCache(server_info,image);
if (status == MagickFalse)
{
ThrowFileException(exception,CacheError,"UnableToOpenPixelCache",
GetDistributeCacheHostname(server_info));
server_info=DestroyDistributeCacheInfo(server_info);
}
else
{
/*
Create a distributed pixel cache.
*/
status=MagickTrue;
cache_info->type=DistributedCache;
cache_info->server_info=server_info;
(void) FormatLocaleString(cache_info->cache_filename,
MagickPathExtent,"%s:%d",GetDistributeCacheHostname(
(DistributeCacheInfo *) cache_info->server_info),
GetDistributeCachePort((DistributeCacheInfo *)
cache_info->server_info));
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickFalse,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",
cache_info->filename,cache_info->cache_filename,
GetDistributeCacheFile((DistributeCacheInfo *)
cache_info->server_info),type,(double) cache_info->columns,
(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
}
cache_info->type=UndefinedCache;
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
/*
Create pixel cache on disk.
*/
if (status == MagickFalse)
{
cache_info->type=UndefinedCache;
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode) &&
(cache_info->mode != PersistMode))
{
(void) ClosePixelCacheOnDisk(cache_info);
*cache_info->cache_filename='\0';
}
if (OpenPixelCacheOnDisk(cache_info,mode) == MagickFalse)
{
cache_info->type=UndefinedCache;
ThrowFileException(exception,CacheError,"UnableToOpenPixelCache",
image->filename);
return(MagickFalse);
}
status=SetPixelCacheExtent(image,(MagickSizeType) cache_info->offset+
cache_info->length);
if (status == MagickFalse)
{
cache_info->type=UndefinedCache;
ThrowFileException(exception,CacheError,"UnableToExtendCache",
image->filename);
return(MagickFalse);
}
length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+
cache_info->metacontent_extent);
if (length != (MagickSizeType) ((size_t) length))
cache_info->type=DiskCache;
else
{
status=AcquireMagickResource(MapResource,cache_info->length);
if (status == MagickFalse)
cache_info->type=DiskCache;
else
if ((cache_info->type != MapCache) && (cache_info->type != MemoryCache))
{
cache_info->type=DiskCache;
RelinquishMagickResource(MapResource,cache_info->length);
}
else
{
cache_info->pixels=(Quantum *) MapBlob(cache_info->file,mode,
cache_info->offset,(size_t) cache_info->length);
if (cache_info->pixels == (Quantum *) NULL)
{
cache_info->type=DiskCache;
cache_info->mapped=source_info.mapped;
cache_info->pixels=source_info.pixels;
RelinquishMagickResource(MapResource,cache_info->length);
}
else
{
/*
Create file-backed memory-mapped pixel cache.
*/
(void) ClosePixelCacheOnDisk(cache_info);
cache_info->type=MapCache;
cache_info->mapped=MagickTrue;
cache_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
cache_info->metacontent=(void *) (cache_info->pixels+
cache_info->number_channels*number_pixels);
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickTrue,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",
cache_info->filename,cache_info->cache_filename,
cache_info->file,type,(double) cache_info->columns,
(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
}
}
status=MagickTrue;
if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickFalse,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",cache_info->filename,
cache_info->cache_filename,cache_info->file,type,(double)
cache_info->columns,(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r s i s t P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PersistPixelCache() attaches to or initializes a persistent pixel cache. A
% persistent pixel cache is one that resides on disk and is not destroyed
% when the program exits.
%
% The format of the PersistPixelCache() method is:
%
% MagickBooleanType PersistPixelCache(Image *image,const char *filename,
% const MagickBooleanType attach,MagickOffsetType *offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o filename: the persistent pixel cache filename.
%
% o attach: A value other than zero initializes the persistent pixel cache.
%
% o initialize: A value other than zero initializes the persistent pixel
% cache.
%
% o offset: the offset in the persistent cache to store pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType PersistPixelCache(Image *image,
const char *filename,const MagickBooleanType attach,MagickOffsetType *offset,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info,
*magick_restrict clone_info;
MagickBooleanType
status;
ssize_t
page_size;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (void *) NULL);
assert(filename != (const char *) NULL);
assert(offset != (MagickOffsetType *) NULL);
page_size=GetMagickPageSize();
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
if (attach != MagickFalse)
{
/*
Attach existing persistent pixel cache.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"attach persistent cache");
(void) CopyMagickString(cache_info->cache_filename,filename,
MagickPathExtent);
cache_info->type=MapCache;
cache_info->offset=(*offset);
if (OpenPixelCache(image,ReadMode,exception) == MagickFalse)
return(MagickFalse);
*offset+=cache_info->length+page_size-(cache_info->length % page_size);
return(MagickTrue);
}
/*
Clone persistent pixel cache.
*/
status=AcquireMagickResource(DiskResource,cache_info->length);
if (status == MagickFalse)
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
clone_info=(CacheInfo *) ClonePixelCache(cache_info);
clone_info->type=DiskCache;
(void) CopyMagickString(clone_info->cache_filename,filename,MagickPathExtent);
clone_info->file=(-1);
clone_info->storage_class=cache_info->storage_class;
clone_info->colorspace=cache_info->colorspace;
clone_info->alpha_trait=cache_info->alpha_trait;
clone_info->channels=cache_info->channels;
clone_info->columns=cache_info->columns;
clone_info->rows=cache_info->rows;
clone_info->number_channels=cache_info->number_channels;
clone_info->metacontent_extent=cache_info->metacontent_extent;
clone_info->mode=PersistMode;
clone_info->length=cache_info->length;
(void) memcpy(clone_info->channel_map,cache_info->channel_map,
MaxPixelChannels*sizeof(*cache_info->channel_map));
clone_info->offset=(*offset);
status=ClonePixelCacheRepository(clone_info,cache_info,exception);
*offset+=cache_info->length+page_size-(cache_info->length % page_size);
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u e u e A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixelCacheNexus() allocates an region to store image pixels as
% defined by the region rectangle and returns a pointer to the region. This
% region is subsequently transferred from the pixel cache with
% SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the
% pixels are transferred, otherwise a NULL is returned.
%
% The format of the QueueAuthenticPixelCacheNexus() method is:
%
% Quantum *QueueAuthenticPixelCacheNexus(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% const MagickBooleanType clone,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to set.
%
% o clone: clone the pixel cache.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate Quantum *QueueAuthenticPixelCacheNexus(Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
const MagickBooleanType clone,NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
offset;
MagickSizeType
number_pixels;
Quantum
*magick_restrict pixels;
/*
Validate pixel cache geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) GetImagePixelCache(image,clone,exception);
if (cache_info == (Cache) NULL)
return((Quantum *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if ((cache_info->columns == 0) || (cache_info->rows == 0) || (x < 0) ||
(y < 0) || (x >= (ssize_t) cache_info->columns) ||
(y >= (ssize_t) cache_info->rows))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"PixelsAreNotAuthentic","`%s'",image->filename);
return((Quantum *) NULL);
}
offset=(MagickOffsetType) y*cache_info->columns+x;
if (offset < 0)
return((Quantum *) NULL);
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
offset+=(MagickOffsetType) (rows-1)*cache_info->columns+columns-1;
if ((MagickSizeType) offset >= number_pixels)
return((Quantum *) NULL);
/*
Return pixel cache.
*/
pixels=SetPixelCacheNexusPixels(cache_info,WriteMode,x,y,columns,rows,
((image->channels & WriteMaskChannel) != 0) ||
((image->channels & CompositeMaskChannel) != 0) ? MagickTrue : MagickFalse,
nexus_info,exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u e u e A u t h e n t i c P i x e l s C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixelsCache() allocates an region to store image pixels as
% defined by the region rectangle and returns a pointer to the region. This
% region is subsequently transferred from the pixel cache with
% SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the
% pixels are transferred, otherwise a NULL is returned.
%
% The format of the QueueAuthenticPixelsCache() method is:
%
% Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u e u e A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixels() queues a mutable pixel region. If the region is
% successfully initialized a pointer to a Quantum array representing the
% region is returned, otherwise NULL is returned. The returned pointer may
% point to a temporary working buffer for the pixels or it may point to the
% final location of the pixels in memory.
%
% Write-only access means that any existing pixel values corresponding to
% the region are ignored. This is useful if the initial image is being
% created from scratch, or if the existing pixel values are to be
% completely replaced without need to refer to their pre-existing values.
% The application is free to read and write the pixel buffer returned by
% QueueAuthenticPixels() any way it pleases. QueueAuthenticPixels() does not
% initialize the pixel array values. Initializing pixel array values is the
% application's responsibility.
%
% Performance is maximized if the selected region is part of one row, or
% one or more full rows, since then there is opportunity to access the
% pixels in-place (without a copy) if the image is in memory, or in a
% memory-mapped file. The returned pointer must *never* be deallocated
% by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% Quantum. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to
% obtain the meta-content (of type void) corresponding to the region.
% Once the Quantum (and/or Quantum) array has been updated, the
% changes must be saved back to the underlying image using
% SyncAuthenticPixels() or they may be lost.
%
% The format of the QueueAuthenticPixels() method is:
%
% Quantum *QueueAuthenticPixels(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Quantum *QueueAuthenticPixels(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict pixels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.queue_authentic_pixels_handler !=
(QueueAuthenticPixelsHandler) NULL)
{
pixels=cache_info->methods.queue_authentic_pixels_handler(image,x,y,
columns,rows,exception);
return(pixels);
}
assert(id < (int) cache_info->number_threads);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d P i x e l C a c h e M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPixelCacheMetacontent() reads metacontent from the specified region of
% the pixel cache.
%
% The format of the ReadPixelCacheMetacontent() method is:
%
% MagickBooleanType ReadPixelCacheMetacontent(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to read the metacontent.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickOffsetType ReadPixelCacheRegion(
const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset,
const MagickSizeType length,unsigned char *magick_restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PREAD)
if (lseek(cache_info->file,offset,SEEK_SET) < 0)
return((MagickOffsetType) -1);
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PREAD)
count=read(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX));
#else
count=pread(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX),offset+i);
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
return(i);
}
static MagickBooleanType ReadPixelCacheMetacontent(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register ssize_t
y;
register unsigned char
*magick_restrict q;
size_t
rows;
if (cache_info->metacontent_extent == 0)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*
cache_info->metacontent_extent;
extent=length*nexus_info->region.height;
rows=nexus_info->region.height;
y=0;
q=(unsigned char *) nexus_info->metacontent;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register unsigned char
*magick_restrict p;
/*
Read meta-content from memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
p=(unsigned char *) cache_info->metacontent+offset*
cache_info->metacontent_extent;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->metacontent_extent*cache_info->columns;
q+=cache_info->metacontent_extent*nexus_info->region.width;
}
break;
}
case DiskCache:
{
/*
Read meta content from disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
extent=(MagickSizeType) cache_info->columns*cache_info->rows;
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadPixelCacheRegion(cache_info,cache_info->offset+extent*
cache_info->number_channels*sizeof(Quantum)+offset*
cache_info->metacontent_extent,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
offset+=cache_info->columns;
q+=cache_info->metacontent_extent*nexus_info->region.width;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Read metacontent from distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadDistributePixelCacheMetacontent((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
q+=cache_info->metacontent_extent*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToReadPixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPixelCachePixels() reads pixels from the specified region of the pixel
% cache.
%
% The format of the ReadPixelCachePixels() method is:
%
% MagickBooleanType ReadPixelCachePixels(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to read the pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ReadPixelCachePixels(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register Quantum
*magick_restrict q;
register ssize_t
y;
size_t
number_channels,
rows;
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns;
if ((ssize_t) (offset/cache_info->columns) != nexus_info->region.y)
return(MagickFalse);
offset+=nexus_info->region.x;
number_channels=cache_info->number_channels;
length=(MagickSizeType) number_channels*nexus_info->region.width*
sizeof(Quantum);
if ((length/number_channels/sizeof(Quantum)) != nexus_info->region.width)
return(MagickFalse);
rows=nexus_info->region.height;
extent=length*rows;
if ((extent == 0) || ((extent/length) != rows))
return(MagickFalse);
y=0;
q=nexus_info->pixels;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register Quantum
*magick_restrict p;
/*
Read pixels from memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
p=cache_info->pixels+cache_info->number_channels*offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->number_channels*cache_info->columns;
q+=cache_info->number_channels*nexus_info->region.width;
}
break;
}
case DiskCache:
{
/*
Read pixels from disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadPixelCacheRegion(cache_info,cache_info->offset+offset*
cache_info->number_channels*sizeof(*q),length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
offset+=cache_info->columns;
q+=cache_info->number_channels*nexus_info->region.width;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Read pixels from distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadDistributePixelCachePixels((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
q+=cache_info->number_channels*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToReadPixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e f e r e n c e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReferencePixelCache() increments the reference count associated with the
% pixel cache returning a pointer to the cache.
%
% The format of the ReferencePixelCache method is:
%
% Cache ReferencePixelCache(Cache cache_info)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
*/
MagickPrivate Cache ReferencePixelCache(Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache *) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
LockSemaphoreInfo(cache_info->semaphore);
cache_info->reference_count++;
UnlockSemaphoreInfo(cache_info->semaphore);
return(cache_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e s e t P i x e l C a c h e C h a n n e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetPixelCacheChannels() resets the pixel cache channels.
%
% The format of the ResetPixelCacheChannels method is:
%
% void ResetPixelCacheChannels(Image *)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickPrivate void ResetPixelCacheChannels(Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
cache_info->number_channels=GetPixelChannels(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e s e t C a c h e A n o n y m o u s M e m o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetCacheAnonymousMemory() resets the anonymous_memory value.
%
% The format of the ResetCacheAnonymousMemory method is:
%
% void ResetCacheAnonymousMemory(void)
%
*/
MagickPrivate void ResetCacheAnonymousMemory(void)
{
cache_anonymous_memory=0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e s e t P i x e l C a c h e E p o c h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetPixelCacheEpoch() resets the pixel cache epoch.
%
% The format of the ResetPixelCacheEpoch method is:
%
% void ResetPixelCacheEpoch(void)
%
*/
MagickPrivate void ResetPixelCacheEpoch(void)
{
cache_epoch=0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheMethods() sets the image pixel methods to the specified ones.
%
% The format of the SetPixelCacheMethods() method is:
%
% SetPixelCacheMethods(Cache *,CacheMethods *cache_methods)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o cache_methods: Specifies a pointer to a CacheMethods structure.
%
*/
MagickPrivate void SetPixelCacheMethods(Cache cache,CacheMethods *cache_methods)
{
CacheInfo
*magick_restrict cache_info;
GetOneAuthenticPixelFromHandler
get_one_authentic_pixel_from_handler;
GetOneVirtualPixelFromHandler
get_one_virtual_pixel_from_handler;
/*
Set cache pixel methods.
*/
assert(cache != (Cache) NULL);
assert(cache_methods != (CacheMethods *) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
if (cache_methods->get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL)
cache_info->methods.get_virtual_pixel_handler=
cache_methods->get_virtual_pixel_handler;
if (cache_methods->destroy_pixel_handler != (DestroyPixelHandler) NULL)
cache_info->methods.destroy_pixel_handler=
cache_methods->destroy_pixel_handler;
if (cache_methods->get_virtual_metacontent_from_handler !=
(GetVirtualMetacontentFromHandler) NULL)
cache_info->methods.get_virtual_metacontent_from_handler=
cache_methods->get_virtual_metacontent_from_handler;
if (cache_methods->get_authentic_pixels_handler !=
(GetAuthenticPixelsHandler) NULL)
cache_info->methods.get_authentic_pixels_handler=
cache_methods->get_authentic_pixels_handler;
if (cache_methods->queue_authentic_pixels_handler !=
(QueueAuthenticPixelsHandler) NULL)
cache_info->methods.queue_authentic_pixels_handler=
cache_methods->queue_authentic_pixels_handler;
if (cache_methods->sync_authentic_pixels_handler !=
(SyncAuthenticPixelsHandler) NULL)
cache_info->methods.sync_authentic_pixels_handler=
cache_methods->sync_authentic_pixels_handler;
if (cache_methods->get_authentic_pixels_from_handler !=
(GetAuthenticPixelsFromHandler) NULL)
cache_info->methods.get_authentic_pixels_from_handler=
cache_methods->get_authentic_pixels_from_handler;
if (cache_methods->get_authentic_metacontent_from_handler !=
(GetAuthenticMetacontentFromHandler) NULL)
cache_info->methods.get_authentic_metacontent_from_handler=
cache_methods->get_authentic_metacontent_from_handler;
get_one_virtual_pixel_from_handler=
cache_info->methods.get_one_virtual_pixel_from_handler;
if (get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
cache_info->methods.get_one_virtual_pixel_from_handler=
cache_methods->get_one_virtual_pixel_from_handler;
get_one_authentic_pixel_from_handler=
cache_methods->get_one_authentic_pixel_from_handler;
if (get_one_authentic_pixel_from_handler !=
(GetOneAuthenticPixelFromHandler) NULL)
cache_info->methods.get_one_authentic_pixel_from_handler=
cache_methods->get_one_authentic_pixel_from_handler;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t P i x e l C a c h e N e x u s P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheNexusPixels() defines the region of the cache for the
% specified cache nexus.
%
% The format of the SetPixelCacheNexusPixels() method is:
%
% Quantum SetPixelCacheNexusPixels(
% const CacheInfo *magick_restrict cache_info,const MapMode mode,
% const ssize_t x,const ssize_t y,const size_t width,const size_t height,
% const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info,$ ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o mode: ReadMode, WriteMode, or IOMode.
%
% o x,y,width,height: define the region of this particular cache nexus.
%
% o buffered: if true, nexus pixels are buffered.
%
% o nexus_info: the cache nexus to set.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType AcquireCacheNexusPixels(
const CacheInfo *magick_restrict cache_info,const MagickSizeType length,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
if (length != (MagickSizeType) ((size_t) length))
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"PixelCacheAllocationFailed","`%s'",
cache_info->filename);
return(MagickFalse);
}
nexus_info->length=0;
nexus_info->mapped=MagickFalse;
if (cache_anonymous_memory <= 0)
{
nexus_info->cache=(Quantum *) MagickAssumeAligned(AcquireAlignedMemory(1,
(size_t) length));
if (nexus_info->cache != (Quantum *) NULL)
(void) memset(nexus_info->cache,0,(size_t) length);
}
else
{
nexus_info->cache=(Quantum *) MapBlob(-1,IOMode,0,(size_t) length);
if (nexus_info->cache != (Quantum *) NULL)
nexus_info->mapped=MagickTrue;
}
if (nexus_info->cache == (Quantum *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"PixelCacheAllocationFailed","`%s'",
cache_info->filename);
return(MagickFalse);
}
nexus_info->length=length;
return(MagickTrue);
}
static inline void PrefetchPixelCacheNexusPixels(const NexusInfo *nexus_info,
const MapMode mode)
{
if (nexus_info->length < CACHE_LINE_SIZE)
return;
if (mode == ReadMode)
{
MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE,
0,1);
return;
}
MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE,1,1);
}
static Quantum *SetPixelCacheNexusPixels(
const CacheInfo *magick_restrict cache_info,const MapMode mode,
const ssize_t x,const ssize_t y,const size_t width,const size_t height,
const MagickBooleanType buffered,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
MagickBooleanType
status;
MagickSizeType
length,
number_pixels;
assert(cache_info != (const CacheInfo *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return((Quantum *) NULL);
if ((width == 0) || (height == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"NoPixelsDefinedInCache","`%s'",cache_info->filename);
return((Quantum *) NULL);
}
(void) memset(&nexus_info->region,0,sizeof(nexus_info->region));
assert(nexus_info->signature == MagickCoreSignature);
if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) &&
(buffered == MagickFalse))
{
if (((x >= 0) && (y >= 0) &&
(((ssize_t) height+y-1) < (ssize_t) cache_info->rows)) &&
(((x == 0) && (width == cache_info->columns)) || ((height == 1) &&
(((ssize_t) width+x-1) < (ssize_t) cache_info->columns))))
{
MagickOffsetType
offset;
/*
Pixels are accessed directly from memory.
*/
offset=(MagickOffsetType) y*cache_info->columns+x;
nexus_info->pixels=cache_info->pixels+cache_info->number_channels*
offset;
nexus_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
nexus_info->metacontent=(unsigned char *) cache_info->metacontent+
offset*cache_info->metacontent_extent;
nexus_info->region.width=width;
nexus_info->region.height=height;
nexus_info->region.x=x;
nexus_info->region.y=y;
nexus_info->authentic_pixel_cache=MagickTrue;
PrefetchPixelCacheNexusPixels(nexus_info,mode);
return(nexus_info->pixels);
}
}
/*
Pixels are stored in a staging region until they are synced to the cache.
*/
if (((MagickSizeType) width > cache_info->width_limit) ||
((MagickSizeType) height > cache_info->height_limit))
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"WidthOrHeightExceedsLimit","`%s'",cache_info->filename);
return((Quantum *) NULL);
}
number_pixels=(MagickSizeType) width*height;
length=MagickMax(number_pixels,MagickMax(cache_info->columns,
cache_info->rows))*cache_info->number_channels*sizeof(*nexus_info->pixels);
if (cache_info->metacontent_extent != 0)
length+=number_pixels*cache_info->metacontent_extent;
status=MagickTrue;
if (nexus_info->cache == (Quantum *) NULL)
status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception);
else
if (nexus_info->length < length)
{
RelinquishCacheNexusPixels(nexus_info);
status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception);
}
if (status == MagickFalse)
return((Quantum *) NULL);
nexus_info->pixels=nexus_info->cache;
nexus_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
nexus_info->metacontent=(void *) (nexus_info->pixels+
cache_info->number_channels*number_pixels);
nexus_info->region.width=width;
nexus_info->region.height=height;
nexus_info->region.x=x;
nexus_info->region.y=y;
nexus_info->authentic_pixel_cache=cache_info->type == PingCache ?
MagickTrue : MagickFalse;
PrefetchPixelCacheNexusPixels(nexus_info,mode);
return(nexus_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t P i x e l C a c h e V i r t u a l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheVirtualMethod() sets the "virtual pixels" method for the
% pixel cache and returns the previous setting. A virtual pixel is any pixel
% access that is outside the boundaries of the image cache.
%
% The format of the SetPixelCacheVirtualMethod() method is:
%
% VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image,
% const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: choose the type of virtual pixel.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType SetCacheAlphaChannel(Image *image,const Quantum alpha,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
CacheView
*magick_restrict image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
image->alpha_trait=BlendPixelTrait;
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception); /* must be virtual */
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelAlpha(image,alpha,q);
q+=GetPixelChannels(image);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
}
image_view=DestroyCacheView(image_view);
return(status);
}
MagickPrivate VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image,
const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
VirtualPixelMethod
method;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
method=cache_info->virtual_pixel_method;
cache_info->virtual_pixel_method=virtual_pixel_method;
if ((image->columns != 0) && (image->rows != 0))
switch (virtual_pixel_method)
{
case BackgroundVirtualPixelMethod:
{
if ((image->background_color.alpha_trait != UndefinedPixelTrait) &&
(image->alpha_trait == UndefinedPixelTrait))
(void) SetCacheAlphaChannel(image,OpaqueAlpha,exception);
if ((IsPixelInfoGray(&image->background_color) == MagickFalse) &&
(IsGrayColorspace(image->colorspace) != MagickFalse))
(void) SetImageColorspace(image,sRGBColorspace,exception);
break;
}
case TransparentVirtualPixelMethod:
{
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetCacheAlphaChannel(image,OpaqueAlpha,exception);
break;
}
default:
break;
}
return(method);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c O p e n C L B u f f e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticOpenCLBuffer() makes sure that all the OpenCL operations have
% been completed and updates the host memory.
%
% The format of the SyncAuthenticOpenCLBuffer() method is:
%
% void SyncAuthenticOpenCLBuffer(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void CopyOpenCLBuffer(CacheInfo *magick_restrict cache_info)
{
assert(cache_info != (CacheInfo *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if ((cache_info->type != MemoryCache) ||
(cache_info->opencl == (MagickCLCacheInfo) NULL))
return;
/*
Ensure single threaded access to OpenCL environment.
*/
LockSemaphoreInfo(cache_info->semaphore);
cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl);
UnlockSemaphoreInfo(cache_info->semaphore);
}
MagickPrivate void SyncAuthenticOpenCLBuffer(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
cache_info=(CacheInfo *) image->cache;
CopyOpenCLBuffer(cache_info);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixelCacheNexus() saves the authentic image pixels to the
% in-memory or disk cache. The method returns MagickTrue if the pixel region
% is synced, otherwise MagickFalse.
%
% The format of the SyncAuthenticPixelCacheNexus() method is:
%
% MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to sync.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickBooleanType
status;
/*
Transfer pixels to the cache.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->cache == (Cache) NULL)
ThrowBinaryException(CacheError,"PixelCacheIsNotOpen",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return(MagickFalse);
if (image->mask_trait != UpdatePixelTrait)
{
if (((image->channels & WriteMaskChannel) != 0) &&
(ClipPixelCacheNexus(image,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if (((image->channels & CompositeMaskChannel) != 0) &&
(MaskPixelCacheNexus(image,nexus_info,exception) == MagickFalse))
return(MagickFalse);
}
if (nexus_info->authentic_pixel_cache != MagickFalse)
{
image->taint=MagickTrue;
return(MagickTrue);
}
assert(cache_info->signature == MagickCoreSignature);
status=WritePixelCachePixels(cache_info,nexus_info,exception);
if ((cache_info->metacontent_extent != 0) &&
(WritePixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if (status != MagickFalse)
image->taint=MagickTrue;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixelsCache() saves the authentic image pixels to the in-memory
% or disk cache. The method returns MagickTrue if the pixel region is synced,
% otherwise MagickFalse.
%
% The format of the SyncAuthenticPixelsCache() method is:
%
% MagickBooleanType SyncAuthenticPixelsCache(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType SyncAuthenticPixelsCache(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id],
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixels() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is flushed, otherwise
% MagickFalse.
%
% The format of the SyncAuthenticPixels() method is:
%
% MagickBooleanType SyncAuthenticPixels(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SyncAuthenticPixels(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL)
{
status=cache_info->methods.sync_authentic_pixels_handler(image,
exception);
return(status);
}
assert(id < (int) cache_info->number_threads);
status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id],
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImagePixelCache() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is flushed, otherwise
% MagickFalse.
%
% The format of the SyncImagePixelCache() method is:
%
% MagickBooleanType SyncImagePixelCache(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate MagickBooleanType SyncImagePixelCache(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(exception != (ExceptionInfo *) NULL);
cache_info=(CacheInfo *) GetImagePixelCache(image,MagickTrue,exception);
return(cache_info == (CacheInfo *) NULL ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ W r i t e P i x e l C a c h e M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePixelCacheMetacontent() writes the meta-content to the specified region
% of the pixel cache.
%
% The format of the WritePixelCacheMetacontent() method is:
%
% MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to write the meta-content.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register const unsigned char
*magick_restrict p;
register ssize_t
y;
size_t
rows;
if (cache_info->metacontent_extent == 0)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*
cache_info->metacontent_extent;
extent=(MagickSizeType) length*nexus_info->region.height;
rows=nexus_info->region.height;
y=0;
p=(unsigned char *) nexus_info->metacontent;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register unsigned char
*magick_restrict q;
/*
Write associated pixels to memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
q=(unsigned char *) cache_info->metacontent+offset*
cache_info->metacontent_extent;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=nexus_info->region.width*cache_info->metacontent_extent;
q+=cache_info->columns*cache_info->metacontent_extent;
}
break;
}
case DiskCache:
{
/*
Write associated pixels to disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
extent=(MagickSizeType) cache_info->columns*cache_info->rows;
for (y=0; y < (ssize_t) rows; y++)
{
count=WritePixelCacheRegion(cache_info,cache_info->offset+extent*
cache_info->number_channels*sizeof(Quantum)+offset*
cache_info->metacontent_extent,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->metacontent_extent*nexus_info->region.width;
offset+=cache_info->columns;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Write metacontent to distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WriteDistributePixelCacheMetacontent((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->metacontent_extent*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToWritePixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ W r i t e C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePixelCachePixels() writes image pixels to the specified region of the
% pixel cache.
%
% The format of the WritePixelCachePixels() method is:
%
% MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to write the pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePixelCachePixels(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register const Quantum
*magick_restrict p;
register ssize_t
y;
size_t
rows;
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) cache_info->number_channels*nexus_info->region.width*
sizeof(Quantum);
extent=length*nexus_info->region.height;
rows=nexus_info->region.height;
y=0;
p=nexus_info->pixels;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register Quantum
*magick_restrict q;
/*
Write pixels to memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
q=cache_info->pixels+cache_info->number_channels*offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->number_channels*nexus_info->region.width;
q+=cache_info->number_channels*cache_info->columns;
}
break;
}
case DiskCache:
{
/*
Write pixels to disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WritePixelCacheRegion(cache_info,cache_info->offset+offset*
cache_info->number_channels*sizeof(*p),length,(const unsigned char *)
p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->number_channels*nexus_info->region.width;
offset+=cache_info->columns;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Write pixels to distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WriteDistributePixelCachePixels((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->number_channels*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToWritePixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
|
dataset.h | /*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifndef LIGHTGBM_DATASET_H_
#define LIGHTGBM_DATASET_H_
#include <LightGBM/config.h>
#include <LightGBM/feature_group.h>
#include <LightGBM/meta.h>
#include <LightGBM/utils/openmp_wrapper.h>
#include <LightGBM/utils/random.h>
#include <LightGBM/utils/text_reader.h>
#include <string>
#include <functional>
#include <memory>
#include <mutex>
#include <unordered_set>
#include <utility>
#include <vector>
namespace LightGBM {
/*! \brief forward declaration */
class DatasetLoader;
/*!
* \brief This class is used to store some meta(non-feature) data for training data,
* e.g. labels, weights, initial scores, qurey level informations.
*
* Some details:
* 1. Label, used for traning.
* 2. Weights, weighs of records, optional
* 3. Query Boundaries, necessary for lambdarank.
* The documents of i-th query is in [ query_boundarise[i], query_boundarise[i+1] )
* 4. Query Weights, auto calculate by weights and query_boundarise(if both of them are existed)
* the weight for i-th query is sum(query_boundarise[i] , .., query_boundarise[i+1]) / (query_boundarise[i + 1] - query_boundarise[i+1])
* 5. Initial score. optional. if exsitng, the model will boost from this score, otherwise will start from 0.
*/
class Metadata {
public:
/*!
* \brief Null costructor
*/
Metadata();
/*!
* \brief Initialization will load qurey level informations, since it is need for sampling data
* \param data_filename Filename of data
* \param init_score_filename Filename of initial score
*/
void Init(const char* data_filename, const char* initscore_file);
/*!
* \brief init as subset
* \param metadata Filename of data
* \param used_indices
* \param num_used_indices
*/
void Init(const Metadata& metadata, const data_size_t* used_indices, data_size_t num_used_indices);
/*!
* \brief Initial with binary memory
* \param memory Pointer to memory
*/
void LoadFromMemory(const void* memory);
/*! \brief Destructor */
~Metadata();
/*!
* \brief Initial work, will allocate space for label, weight(if exists) and query(if exists)
* \param num_data Number of training data
* \param weight_idx Index of weight column, < 0 means doesn't exists
* \param query_idx Index of query id column, < 0 means doesn't exists
*/
void Init(data_size_t num_data, int weight_idx, int query_idx);
/*!
* \brief Partition label by used indices
* \param used_indices Indice of local used
*/
void PartitionLabel(const std::vector<data_size_t>& used_indices);
/*!
* \brief Partition meta data according to local used indices if need
* \param num_all_data Number of total training data, including other machines' data on parallel learning
* \param used_data_indices Indices of local used training data
*/
void CheckOrPartition(data_size_t num_all_data,
const std::vector<data_size_t>& used_data_indices);
void SetLabel(const label_t* label, data_size_t len);
void SetWeights(const label_t* weights, data_size_t len);
void SetQuery(const data_size_t* query, data_size_t len);
/*!
* \brief Set initial scores
* \param init_score Initial scores, this class will manage memory for init_score.
*/
void SetInitScore(const double* init_score, data_size_t len);
/*!
* \brief Save binary data to file
* \param file File want to write
*/
void SaveBinaryToFile(const VirtualFileWriter* writer) const;
/*!
* \brief Get sizes in byte of this object
*/
size_t SizesInByte() const;
/*!
* \brief Get pointer of label
* \return Pointer of label
*/
inline const label_t* label() const { return label_.data(); }
/*!
* \brief Set label for one record
* \param idx Index of this record
* \param value Label value of this record
*/
inline void SetLabelAt(data_size_t idx, label_t value) {
label_[idx] = value;
}
/*!
* \brief Set Weight for one record
* \param idx Index of this record
* \param value Weight value of this record
*/
inline void SetWeightAt(data_size_t idx, label_t value) {
weights_[idx] = value;
}
/*!
* \brief Set Query Id for one record
* \param idx Index of this record
* \param value Query Id value of this record
*/
inline void SetQueryAt(data_size_t idx, data_size_t value) {
queries_[idx] = static_cast<data_size_t>(value);
}
/*!
* \brief Get weights, if not exists, will return nullptr
* \return Pointer of weights
*/
inline const label_t* weights() const {
if (!weights_.empty()) {
return weights_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get data boundaries on queries, if not exists, will return nullptr
* we assume data will order by query,
* the interval of [query_boundaris[i], query_boundaris[i+1])
* is the data indices for query i.
* \return Pointer of data boundaries on queries
*/
inline const data_size_t* query_boundaries() const {
if (!query_boundaries_.empty()) {
return query_boundaries_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get Number of queries
* \return Number of queries
*/
inline data_size_t num_queries() const { return num_queries_; }
/*!
* \brief Get weights for queries, if not exists, will return nullptr
* \return Pointer of weights for queries
*/
inline const label_t* query_weights() const {
if (!query_weights_.empty()) {
return query_weights_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get initial scores, if not exists, will return nullptr
* \return Pointer of initial scores
*/
inline const double* init_score() const {
if (!init_score_.empty()) {
return init_score_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get size of initial scores
*/
inline int64_t num_init_score() const { return num_init_score_; }
/*! \brief Disable copy */
Metadata& operator=(const Metadata&) = delete;
/*! \brief Disable copy */
Metadata(const Metadata&) = delete;
private:
/*! \brief Load initial scores from file */
void LoadInitialScore(const char* initscore_file);
/*! \brief Load wights from file */
void LoadWeights();
/*! \brief Load query boundaries from file */
void LoadQueryBoundaries();
/*! \brief Load query wights */
void LoadQueryWeights();
/*! \brief Filename of current data */
std::string data_filename_;
/*! \brief Number of data */
data_size_t num_data_;
/*! \brief Number of weights, used to check correct weight file */
data_size_t num_weights_;
/*! \brief Label data */
std::vector<label_t> label_;
/*! \brief Weights data */
std::vector<label_t> weights_;
/*! \brief Query boundaries */
std::vector<data_size_t> query_boundaries_;
/*! \brief Query weights */
std::vector<label_t> query_weights_;
/*! \brief Number of querys */
data_size_t num_queries_;
/*! \brief Number of Initial score, used to check correct weight file */
int64_t num_init_score_;
/*! \brief Initial score */
std::vector<double> init_score_;
/*! \brief Queries data */
std::vector<data_size_t> queries_;
/*! \brief mutex for threading safe call */
std::mutex mutex_;
bool weight_load_from_file_;
bool query_load_from_file_;
bool init_score_load_from_file_;
};
/*! \brief Interface for Parser */
class Parser {
public:
/*! \brief virtual destructor */
virtual ~Parser() {}
/*!
* \brief Parse one line with label
* \param str One line record, string format, should end with '\0'
* \param out_features Output columns, store in (column_idx, values)
* \param out_label Label will store to this if exists
*/
virtual void ParseOneLine(const char* str,
std::vector<std::pair<int, double>>* out_features, double* out_label) const = 0;
virtual int TotalColumns() const = 0;
/*!
* \brief Create a object of parser, will auto choose the format depend on file
* \param filename One Filename of data
* \param num_features Pass num_features of this data file if you know, <=0 means don't know
* \param label_idx index of label column
* \return Object of parser
*/
static Parser* CreateParser(const char* filename, bool header, int num_features, int label_idx);
};
/*! \brief The main class of data set,
* which are used to traning or validation
*/
class Dataset {
public:
friend DatasetLoader;
LIGHTGBM_EXPORT Dataset();
LIGHTGBM_EXPORT Dataset(data_size_t num_data);
void Construct(
std::vector<std::unique_ptr<BinMapper>>& bin_mappers,
int** sample_non_zero_indices,
const int* num_per_col,
size_t total_sample_cnt,
const Config& io_config);
/*! \brief Destructor */
LIGHTGBM_EXPORT ~Dataset();
LIGHTGBM_EXPORT bool CheckAlign(const Dataset& other) const {
if (num_features_ != other.num_features_) {
return false;
}
if (num_total_features_ != other.num_total_features_) {
return false;
}
if (label_idx_ != other.label_idx_) {
return false;
}
for (int i = 0; i < num_features_; ++i) {
if (!FeatureBinMapper(i)->CheckAlign(*(other.FeatureBinMapper(i)))) {
return false;
}
}
return true;
}
inline void PushOneRow(int tid, data_size_t row_idx, const std::vector<double>& feature_values) {
if (is_finish_load_) { return; }
for (size_t i = 0; i < feature_values.size() && i < static_cast<size_t>(num_total_features_); ++i) {
int feature_idx = used_feature_map_[i];
if (feature_idx >= 0) {
const int group = feature2group_[feature_idx];
const int sub_feature = feature2subfeature_[feature_idx];
feature_groups_[group]->PushData(tid, sub_feature, row_idx, feature_values[i]);
}
}
}
inline void PushOneRow(int tid, data_size_t row_idx, const std::vector<std::pair<int, double>>& feature_values) {
if (is_finish_load_) { return; }
for (auto& inner_data : feature_values) {
if (inner_data.first >= num_total_features_) { continue; }
int feature_idx = used_feature_map_[inner_data.first];
if (feature_idx >= 0) {
const int group = feature2group_[feature_idx];
const int sub_feature = feature2subfeature_[feature_idx];
feature_groups_[group]->PushData(tid, sub_feature, row_idx, inner_data.second);
}
}
}
inline void PushOneData(int tid, data_size_t row_idx, int group, int sub_feature, double value) {
feature_groups_[group]->PushData(tid, sub_feature, row_idx, value);
}
inline int RealFeatureIndex(int fidx) const {
return real_feature_idx_[fidx];
}
inline int InnerFeatureIndex(int col_idx) const {
return used_feature_map_[col_idx];
}
inline int Feature2Group(int feature_idx) const {
return feature2group_[feature_idx];
}
inline int Feture2SubFeature(int feature_idx) const {
return feature2subfeature_[feature_idx];
}
inline uint64_t GroupBinBoundary(int group_idx) const {
return group_bin_boundaries_[group_idx];
}
inline uint64_t NumTotalBin() const {
return group_bin_boundaries_.back();
}
inline std::vector<int> ValidFeatureIndices() const {
std::vector<int> ret;
for (int i = 0; i < num_total_features_; ++i) {
if (used_feature_map_[i] >= 0) {
ret.push_back(i);
}
}
return ret;
}
/* LGBM_CUDA void ReSize(data_size_t num_data); */
// LGBM_CUDA ReSize() returns true if resized
bool ReSize(data_size_t num_data);
void CopySubset(const Dataset* fullset, const data_size_t* used_indices, data_size_t num_used_indices, bool need_meta_data);
LIGHTGBM_EXPORT void FinishLoad();
LIGHTGBM_EXPORT bool SetFloatField(const char* field_name, const float* field_data, data_size_t num_element);
LIGHTGBM_EXPORT bool SetDoubleField(const char* field_name, const double* field_data, data_size_t num_element);
LIGHTGBM_EXPORT bool SetIntField(const char* field_name, const int* field_data, data_size_t num_element);
LIGHTGBM_EXPORT bool GetFloatField(const char* field_name, data_size_t* out_len, const float** out_ptr);
LIGHTGBM_EXPORT bool GetDoubleField(const char* field_name, data_size_t* out_len, const double** out_ptr);
LIGHTGBM_EXPORT bool GetIntField(const char* field_name, data_size_t* out_len, const int** out_ptr);
LIGHTGBM_EXPORT bool GetInt8Field(const char* field_name, data_size_t* out_len, const int8_t** out_ptr);
/*!
* \brief Save current dataset into binary file, will save to "filename.bin"
*/
LIGHTGBM_EXPORT void SaveBinaryFile(const char* bin_filename);
LIGHTGBM_EXPORT void DumpTextFile(const char* text_filename);
LIGHTGBM_EXPORT void CopyFeatureMapperFrom(const Dataset* dataset);
LIGHTGBM_EXPORT void CreateValid(const Dataset* dataset);
void ConstructHistograms(const std::vector<int8_t>& is_feature_used,
const data_size_t* data_indices, data_size_t num_data,
int leaf_idx,
std::vector<std::unique_ptr<OrderedBin>>& ordered_bins,
const score_t* gradients, const score_t* hessians,
score_t* ordered_gradients, score_t* ordered_hessians,
bool is_constant_hessian,
HistogramBinEntry* histogram_data) const;
void FixHistogram(int feature_idx, double sum_gradient, double sum_hessian, data_size_t num_data,
HistogramBinEntry* data) const;
inline data_size_t Split(int feature,
const uint32_t* threshold, int num_threshold, bool default_left,
data_size_t* data_indices, data_size_t num_data,
data_size_t* lte_indices, data_size_t* gt_indices) const {
const int group = feature2group_[feature];
const int sub_feature = feature2subfeature_[feature];
return feature_groups_[group]->Split(sub_feature, threshold, num_threshold, default_left, data_indices, num_data, lte_indices, gt_indices);
}
inline int SubFeatureBinOffset(int i) const {
const int sub_feature = feature2subfeature_[i];
if (sub_feature == 0) {
return 1;
} else {
return 0;
}
}
inline int FeatureNumBin(int i) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature]->num_bin();
}
inline int8_t FeatureMonotone(int i) const {
if (monotone_types_.empty()) {
return 0;
} else {
return monotone_types_[i];
}
}
inline double FeaturePenalte(int i) const {
if (feature_penalty_.empty()) {
return 1;
} else {
return feature_penalty_[i];
}
}
bool HasMonotone() const {
if (monotone_types_.empty()) {
return false;
} else {
for (size_t i = 0; i < monotone_types_.size(); ++i) {
if (monotone_types_[i] != 0) {
return true;
}
}
return false;
}
}
inline int FeatureGroupNumBin(int group) const {
return feature_groups_[group]->num_total_bin_;
}
inline const BinMapper* FeatureBinMapper(int i) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature].get();
}
inline const Bin* FeatureBin(int i) const {
const int group = feature2group_[i];
return feature_groups_[group]->bin_data_.get();
}
inline const Bin* FeatureGroupBin(int group) const {
return feature_groups_[group]->bin_data_.get();
}
inline bool FeatureGroupIsSparse(int group) const {
return feature_groups_[group]->is_sparse_;
}
inline BinIterator* FeatureIterator(int i) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->SubFeatureIterator(sub_feature);
}
inline BinIterator* FeatureGroupIterator(int group) const {
return feature_groups_[group]->FeatureGroupIterator();
}
// LGBM_CUDA
inline size_t FeatureGroupSizesInByte(int group) const {
return feature_groups_[group]->FeatureGroupSizesInByte();
}
// LGBM_CUDA
inline void* FeatureGroupData(int group) const {
return feature_groups_[group]->FeatureGroupData();
}
inline double RealThreshold(int i, uint32_t threshold) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature]->BinToValue(threshold);
}
// given a real threshold, find the closest threshold bin
inline uint32_t BinThreshold(int i, double threshold_double) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature]->ValueToBin(threshold_double);
}
inline void CreateOrderedBins(std::vector<std::unique_ptr<OrderedBin>>* ordered_bins) const {
ordered_bins->resize(num_groups_);
OMP_INIT_EX();
#pragma omp parallel for schedule(guided)
for (int i = 0; i < num_groups_; ++i) {
OMP_LOOP_EX_BEGIN();
ordered_bins->at(i).reset(feature_groups_[i]->bin_data_->CreateOrderedBin());
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
}
/*!
* \brief Get meta data pointer
* \return Pointer of meta data
*/
inline const Metadata& metadata() const { return metadata_; }
/*! \brief Get Number of used features */
inline int num_features() const { return num_features_; }
/*! \brief Get Number of feature groups */
inline int num_feature_groups() const { return num_groups_;}
/*! \brief Get Number of total features */
inline int num_total_features() const { return num_total_features_; }
/*! \brief Get the index of label column */
inline int label_idx() const { return label_idx_; }
/*! \brief Get names of current data set */
inline const std::vector<std::string>& feature_names() const { return feature_names_; }
inline void set_feature_names(const std::vector<std::string>& feature_names) {
if (feature_names.size() != static_cast<size_t>(num_total_features_)) {
Log::Fatal("Size of feature_names error, should equal with total number of features");
}
feature_names_ = std::vector<std::string>(feature_names);
// replace ' ' in feature_names with '_'
bool spaceInFeatureName = false;
for (auto& feature_name : feature_names_) {
if (feature_name.find(' ') != std::string::npos) {
spaceInFeatureName = true;
std::replace(feature_name.begin(), feature_name.end(), ' ', '_');
}
}
if (spaceInFeatureName) {
Log::Warning("Find whitespaces in feature_names, replace with underlines");
}
}
inline std::vector<std::string> feature_infos() const {
std::vector<std::string> bufs;
for (int i = 0; i < num_total_features_; i++) {
int fidx = used_feature_map_[i];
if (fidx == -1) {
bufs.push_back("none");
} else {
const auto bin_mapper = FeatureBinMapper(fidx);
bufs.push_back(bin_mapper->bin_info());
}
}
return bufs;
}
void ResetConfig(const char* parameters);
/*! \brief Get Number of data */
inline data_size_t num_data() const { return num_data_; }
/*! \brief Disable copy */
Dataset& operator=(const Dataset&) = delete;
/*! \brief Disable copy */
Dataset(const Dataset&) = delete;
void addFeaturesFrom(Dataset* other);
private:
std::string data_filename_;
/*! \brief Store used features */
std::vector<std::unique_ptr<FeatureGroup>> feature_groups_;
/*! \brief Mapper from real feature index to used index*/
std::vector<int> used_feature_map_;
/*! \brief Number of used features*/
int num_features_;
/*! \brief Number of total features*/
int num_total_features_;
/*! \brief Number of total data*/
data_size_t num_data_;
/*! \brief Store some label level data*/
Metadata metadata_;
/*! \brief index of label column */
int label_idx_ = 0;
/*! \brief Threshold for treating a feature as a sparse feature */
double sparse_threshold_;
/*! \brief store feature names */
std::vector<std::string> feature_names_;
/*! \brief store feature names */
static const char* binary_file_token;
int num_groups_;
std::vector<int> real_feature_idx_;
std::vector<int> feature2group_;
std::vector<int> feature2subfeature_;
std::vector<uint64_t> group_bin_boundaries_;
std::vector<int> group_feature_start_;
std::vector<int> group_feature_cnt_;
std::vector<int8_t> monotone_types_;
std::vector<double> feature_penalty_;
bool is_finish_load_;
int max_bin_;
int bin_construct_sample_cnt_;
int min_data_in_bin_;
bool use_missing_;
bool zero_as_missing_;
};
} // namespace LightGBM
#endif // LightGBM_DATA_H_
|
sip_fmt_plug.c | /* SIP cracker patch for JtR. Hacked together during March of 2012 by
* Dhiru Kholia <dhiru.kholia at gmail.com> .
*
* Copyright (C) 2007 Martin J. Muench <mjm@codito.de>
* SIP digest authentication password (hash) cracker
* See doc/SIPcrack-LICENSE */
#if FMT_EXTERNS_H
extern struct fmt_main fmt_sip;
#elif FMT_REGISTERS_H
john_register_one(&fmt_sip);
#else
#include "md5.h"
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <assert.h>
#include <errno.h>
#include "arch.h"
#include "crc32.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "sip_fmt_plug.h"
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
// Tuned on core i7 quad HT
// 1 4963K
// 16 8486K
// 32 8730K ** this was chosen.
// 64 8791k
// 128 8908k
#ifndef OMP_SCALE
#define OMP_SCALE 32
#endif
#endif
#include "memdbg.h"
typedef struct sip_salt_t {
int static_hash_data_len;
MD5_CTX ctx_dyna_data;
char static_hash_data[STATIC_HASH_SIZE+1];
} sip_salt;
static sip_salt *pSalt;
#define FORMAT_LABEL "SIP"
#define FORMAT_NAME ""
#define ALGORITHM_NAME "MD5 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 32
#define BINARY_SIZE 16
#define SALT_SIZE sizeof(sip_salt)
#define BINARY_ALIGN 4
#define SALT_ALIGN sizeof(int)
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 64
static struct fmt_tests sip_tests[] = {
{"$sip$*192.168.1.111*192.168.1.104*200*asterisk*REGISTER*sip*192.168.1.104**46cce857****MD5*4dfc7515936a667565228dbaa0293dfc", "123456"},
{"$sip$*10.0.1.20*10.0.1.10*1001*asterisk*REGISTER*sips*10.0.1.20*5061*0ef95b07****MD5*576e39e9de6a9ed053eb218f65fe470e", "q1XCLF0KaBObo797"},
// generated with pass_gen.pl
{"$sip$*192.168.163.238*192.168.163.239*50894*asterisk*REGISTER*sip*192.168.163.239**303535c9****MD5*e32c95d6ad0fecbc3967b7534d7b5b3b", "123456"},
{"$sip$*192.168.196.105*192.168.196.192*81670*asterisk*REGISTER*sip*192.168.196.192**747f072a****MD5*d15c84b1bdc2155db12b721d7fb9445b", "password"},
{"$sip$*192.168.119.6*192.168.119.154*65790*asterisk*REGISTER*sip*192.168.119.154**8d4e1a4b****MD5*dcc0d8a4c105dbf3ecf5b281f4c57356", "happy123"},
{"$sip$*192.168.113.63*192.168.113.78*59810*asterisk*REGISTER*sip*192.168.113.78**b778256e****MD5*cb13933a5986df471265231d08206509", "aobdataeteag"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_key)[BINARY_SIZE/sizeof(ARCH_WORD_32)];
static char bin2hex_table[256][2]; /* table for bin<->hex mapping */
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
/* Init bin 2 hex table for faster conversions later */
init_bin2hex(bin2hex_table);
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_key));
}
static void done(void)
{
MEM_FREE(crypt_key);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p = ciphertext, *q;
int i,res = 0;
if (strncmp(ciphertext, "$sip$*", 6))
return 0;
if (strlen(ciphertext) > 2048) // sizeof(saltBuf) in get_salt
return 0;
for(i = 0; i < strlen(ciphertext); i++)
if(ciphertext[i] == '*')
res++;
if(res != 14)
goto err;
res = 0;
p += 6;
if ((q = strchr(p, '*')) == NULL)
goto err;
if ((q - p) > HOST_MAXLEN) /* host */
goto err;
p = q + 1;
if ((q = strchr(p, '*')) == NULL)
goto err;
if ((q - p) > HOST_MAXLEN) /* host */
goto err;
p = q + 1;
if ((q = strchr(p, '*')) == NULL)
goto err;
if ((q - p) > USER_MAXLEN) /* user */
goto err;
p = q + 1;
if ((q = strchr(p, '*')) == NULL)
goto err;
if ((q - p) > HOST_MAXLEN) /* realm */
goto err;
p = q + 1;
if ((q = strchr(p, '*')) == NULL)
goto err;
if ((q - p) > METHOD_MAXLEN) /* method */
goto err;
p = q + 1;
/* uri stuff */
if ((q = strchr(p, '*')) == NULL)
goto err;
res += q - p;
p = q + 1;
if ((q = strchr(p, '*')) == NULL)
goto err;
res += q - p;
p = q + 1;
if ((q = strchr(p, '*')) == NULL)
goto err;
res += q - p;
if (res > URI_MAXLEN) /* uri */
goto err;
p = q + 1;
if ((q = strchr(p, '*')) == NULL)
goto err;
if ((q - p) > NONCE_MAXLEN) /* nonce */
goto err;
p = q + 1;
if ((q = strchr(p, '*')) == NULL)
goto err;
if ((q - p) > NONCE_MAXLEN) /* cnonce */
goto err;
p = q + 1;
if ((q = strchr(p, '*')) == NULL)
goto err;
if ((q - p) > CNONCE_MAXLEN) /* nonce_count */
goto err;
p = q + 1;
if ((q = strchr(p, '*')) == NULL)
goto err;
if ((q - p) > QOP_MAXLEN) /* qop */
goto err;
if ((q = strchr(p, '*')) == NULL)
goto err;
if ((q - p) > ALG_MAXLEN) /* algorithm */
goto err;
p = q + 1;
if ((q = strchr(p, '*')) == NULL)
goto err;
if (strncmp("MD5*", p, 4))
goto err;
p = q + 1;
if (strspn(p, HEXCHARS_lc) != MD5_LEN_HEX) /* hash */
goto err;
return 1;
err:
return 0;
}
static void *get_salt(char *ciphertext)
{
static sip_salt salt;
char saltBuf[2048];
char *lines[16];
login_t login;
int num_lines;
MD5_CTX md5_ctx;
unsigned char md5_bin_hash[MD5_LEN];
char static_hash[MD5_LEN_HEX+1];
char *saltcopy = saltBuf;
memset(&salt, 0, sizeof(salt));
strcpy(saltBuf, ciphertext);
saltcopy += 6; /* skip over "$sip$*" */
memset(&login, 0, sizeof(login_t));
num_lines = stringtoarray(lines, saltcopy, '*');
assert(num_lines == 14);
strncpy(login.server, lines[0], sizeof(login.server) - 1 );
strncpy(login.client, lines[1], sizeof(login.client) - 1 );
strncpy(login.user, lines[2], sizeof(login.user) - 1 );
strncpy(login.realm, lines[3], sizeof(login.realm) - 1 );
strncpy(login.method, lines[4], sizeof(login.method) - 1 );
/* special handling for uri */
if (!strcmp(lines[7], ""))
sprintf(login.uri, "%s:%s", lines[5], lines[6]);
else
sprintf(login.uri, "%s:%s:%s", lines[5], lines[6], lines[7]);
strncpy(login.nonce, lines[8], sizeof(login.nonce) - 1 );
strncpy(login.cnonce, lines[9], sizeof(login.cnonce) - 1 );
strncpy(login.nonce_count, lines[10], sizeof(login.nonce_count) - 1 );
strncpy(login.qop, lines[11], sizeof(login.qop) - 1 );
strncpy(login.algorithm, lines[12], sizeof(login.algorithm) - 1 );
strncpy(login.hash, lines[13], sizeof(login.hash) - 1 );
if(strncmp(login.algorithm, "MD5", strlen(login.algorithm))) {
printf("\n* Cannot crack '%s' hash, only MD5 supported so far...\n", login.algorithm);
error();
}
/* Generating MD5 static hash: 'METHOD:URI' */
MD5_Init(&md5_ctx);
MD5_Update(&md5_ctx, (unsigned char*)login.method, strlen( login.method ));
MD5_Update(&md5_ctx, (unsigned char*)":", 1);
MD5_Update(&md5_ctx, (unsigned char*)login.uri, strlen( login.uri ));
MD5_Final(md5_bin_hash, &md5_ctx);
bin_to_hex(bin2hex_table, md5_bin_hash, MD5_LEN, static_hash, MD5_LEN_HEX);
/* Constructing first part of dynamic hash: 'USER:REALM:' */
MD5_Init(&md5_ctx);
MD5_Update(&md5_ctx, login.user, strlen(login.user));
MD5_Update(&md5_ctx, ":", 1);
MD5_Update(&md5_ctx, login.realm, strlen(login.realm));
MD5_Update(&md5_ctx, ":", 1);
memcpy(&(salt.ctx_dyna_data), &md5_ctx, sizeof(md5_ctx));
// we now construct the MD5_CTX with this data loaded. Thus we no longer store this buffer.
//snprintf(salt.dynamic_hash_data, DYNAMIC_HASH_SIZE, "%s:%s:", login.user, login.realm);
//salt.dynamic_hash_data_len = strlen(salt.dynamic_hash_data);
/* Construct last part of final hash data: ':NONCE(:CNONCE:NONCE_COUNT:QOP):<static_hash>' */
/* no qop */
if(!strlen(login.qop))
snprintf(salt.static_hash_data, STATIC_HASH_SIZE, ":%s:%s", login.nonce, static_hash);
/* qop/conce/cnonce_count */
else
snprintf(salt.static_hash_data, STATIC_HASH_SIZE, ":%s:%s:%s:%s:%s",
login.nonce, login.nonce_count, login.cnonce,
login.qop, static_hash);
/* Get lens of static buffers */
salt.static_hash_data_len = strlen(salt.static_hash_data);
/* Begin brute force attack */
#ifdef SIP_DEBUG
printf("Starting bruteforce against user '%s' (%s: '%s')\n",
login.user, login.algorithm, login.hash);
#endif
return &salt;
}
static void set_salt(void *salt)
{
pSalt = (sip_salt*)salt;
}
static void * get_binary(char *ciphertext) {
static char *bin_val;
char *p;
int i;
if (!bin_val) bin_val = (char*)mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD);
p = strrchr(ciphertext, '*') + 1;
for (i = 0; i < BINARY_SIZE; ++i) {
bin_val[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return (void *)bin_val;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++)
{
/* password */
MD5_CTX md5_ctx;
unsigned char md5_bin_hash[MD5_LEN];
char dynamic_hash[MD5_LEN_HEX+1];
/* Generate dynamic hash including pw (see above) */
//MD5_Init(&md5_ctx);
//MD5_Update(&md5_ctx, (unsigned char*)pSalt->dynamic_hash_data, pSalt->dynamic_hash_data_len);
// salt.ctx_dyna_data contains the ctx already loaded.
memcpy(&md5_ctx, &(pSalt->ctx_dyna_data), sizeof(md5_ctx));
MD5_Update(&md5_ctx, (unsigned char*)saved_key[index], strlen(saved_key[index]));
MD5_Final(md5_bin_hash, &md5_ctx);
bin_to_hex(bin2hex_table, md5_bin_hash, MD5_LEN, dynamic_hash, MD5_LEN_HEX);
/* Generate digest response hash */
MD5_Init(&md5_ctx);
MD5_Update(&md5_ctx, (unsigned char*)dynamic_hash, MD5_LEN_HEX);
MD5_Update(&md5_ctx, (unsigned char*)pSalt->static_hash_data, pSalt->static_hash_data_len);
MD5_Final((unsigned char*)crypt_key[index], &md5_ctx);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index;
for (index = 0; index < count; index++)
if ( ((ARCH_WORD_32*)binary)[0] == ((ARCH_WORD_32*)&(crypt_key[index][0]))[0] )
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_key[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void sip_set_key(char *key, int index)
{
int saved_len = strlen(key);
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
static int get_hash_0(int index) { return crypt_key[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_key[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_key[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_key[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_key[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_key[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_key[index][0] & PH_MASK_6; }
struct fmt_main fmt_sip = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
sip_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
sip_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
version1_1.c | // Compile with:
//
//
// To specify the number of bodies in the world, the program optionally accepts
// an integer as its first command line argument.
#include <time.h>
#include <sys/times.h>
#include <math.h>
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <X11/Xlib.h>
#include <unistd.h>
#include "omp.h"
#define WIDTH 1024
#define HEIGHT 768
// default number of bodies
#define DEF_NUM_BODIES 200
// gravitational constant
#define GRAV 10.0
// initial velocities are scaled by this value
#define V_SCALAR 20.0
// initial masses are scaled by this value
#define M_SCALAR 5.0
// radius scalar
#define R_SCALAR 3
// coefficient of restitution determines the elasticity of a collision: C_REST = [0,1]
// if C_REST = 0 -> perfectly inelastic (particles stick together)
// if C_REST = 1 -> perfectly elastic (no loss of speed)
#define C_REST 0.5
// set the iteration times
#define iteration_times 100
// Must set 0 if run on Pi
#define NOT_RUN_ON_PI 1
struct body {
double x, y; // position
double vx, vy; // velocity
double m; // mass
double r; // radius of the particle
};
struct world {
struct body *bodies;
int num_bodies;
};
clock_t total_time = 0;
//total_time.sec = 0;
//total_time.usec = 0;
/* This function initializes each particle's mass, velocity and position */
struct world *create_world(int num_bodies) {
struct world *world = malloc(sizeof(struct world));
world->num_bodies = num_bodies;
world->bodies = malloc(sizeof(struct body) * num_bodies);
int i = 0;
double x;
double y;
double rc;
int min_dim = (WIDTH < HEIGHT) ? WIDTH : HEIGHT;
while (i < num_bodies) {
x = drand48() * WIDTH;
y = drand48() * HEIGHT;
rc = sqrt((WIDTH / 2 - x) * (WIDTH / 2 - x) + (y - HEIGHT / 2) * (y - HEIGHT / 2));
if (rc <= min_dim / 2) {
world->bodies[i].x = x;
world->bodies[i].y = y;
world->bodies[i].vx = V_SCALAR * (y - HEIGHT / 2) / rc;
world->bodies[i].vy = V_SCALAR * (WIDTH / 2 - x) / rc;
world->bodies[i].m = (1 / (0.025 + drand48())) * M_SCALAR;
world->bodies[i].r = sqrt(world->bodies[i].m / M_PI) * R_SCALAR;
i++;
}
}
return world;
}
// set the foreground color given RGB values between 0..255.
void set_color(Display *disp, GC gc, int r, int g, int b) {
unsigned long int p;
if (r < 0) r = 0; else if (r > 255) r = 255;
if (g < 0) g = 0; else if (g > 255) g = 255;
if (b < 0) b = 0; else if (b > 255) b = 255;
p = (r << 16) | (g << 8) | (b);
XSetForeground(disp, gc, p);
}
/* This function updates the screen with the new positions of each particle */
void draw_world(Display *disp, Pixmap back_buf, GC gc, struct world *world) {
int i;
double x, y, r, r2;
// we turn off aliasing for faster draws
set_color(disp, gc, 255, 255, 255);
XFillRectangle(disp, back_buf, gc, 0, 0, WIDTH, HEIGHT);
for (i = 0; i < world->num_bodies; i++) {
r = world->bodies[i].r;
x = world->bodies[i].x - r;
y = world->bodies[i].y - r;
r2 = r + r;
// draw body
set_color(disp, gc, 255 * 7 / 10, 255 * 7 / 10, 255 * 7 / 10);
XFillArc(disp, back_buf, gc, x, y, r2, r2, 0, 360 * 64);
set_color(disp, gc, 0, 0, 0);
XDrawArc(disp, back_buf, gc, x, y, r2, r2, 0, 360 * 64);
}
}
void collision_step(struct world *world) {
int a, b;
double r, x, y, vx, vy;
// Impose screen boundaries by reversing direction if body is off screen
for (a = 0; a < world->num_bodies; a++) {
r = world->bodies[a].r;
x = world->bodies[a].x;
y = world->bodies[a].y;
vx = world->bodies[a].vx;
vy = world->bodies[a].vy;
if (x - r < 0) { // left edge
if (vx < 0) { world->bodies[a].vx = -C_REST * vx; }
world->bodies[a].x = r;
} else if (x + r > WIDTH) { // right edge
if (vx > 0) { world->bodies[a].vx = -C_REST * vx; }
world->bodies[a].x = WIDTH - r;
}
if (y - r < 0) { // bottom edge
if (vy < 0) { world->bodies[a].vy = -C_REST * vy; }
world->bodies[a].y = r;
} else if (y + r > HEIGHT) { // top edge
if (vy > 0) { world->bodies[a].vy = -C_REST * vy; }
world->bodies[a].y = HEIGHT - r;
}
}
}
void position_step(struct world *world, double time_res) {
/* The forces array stores the x and y components of the total force acting
* on each body. The forces are index like this:
* F on body i in the x dir = F_x[i]
* F on body i in the y dir = F_y[i] */
double *force_x = (double *) malloc(sizeof(double) * world->num_bodies);
double *force_y = (double *) malloc(sizeof(double) * world->num_bodies);
// initialize all forces to zero
force_x = memset(force_x, 0, sizeof(double) * world->num_bodies);
force_y = memset(force_y, 0, sizeof(double) * world->num_bodies);
/* Compute the net force on each body */
#pragma omp parallel for
for (int i = 0; i < world->num_bodies; i++) {
double d, d_cubed, diff_x, diff_y;
for (int j = 0; j < world->num_bodies; j++) {
if (i == j) {
continue;
}
// Compute the x and y distances and total distance d between
// bodies i and j
diff_x = world->bodies[j].x - world->bodies[i].x;
diff_y = world->bodies[j].y - world->bodies[i].y;
d = sqrt((diff_x * diff_x) + (diff_y * diff_y));
if (d < 25) {
d = 25;
}
d_cubed = d * d * d;
// Add force due to j to total force on i
force_x[i] += GRAV * (world->bodies[i].m * world->bodies[j].m
/ d_cubed) * diff_x;
force_y[i] += GRAV * (world->bodies[i].m * world->bodies[j].m
/ d_cubed) * diff_y;
}
}
#pragma omp barrier
// Update the velocity and position of each body
#pragma omp parallel for
for (int i = 0; i < world->num_bodies; i++) {
// Update velocities
world->bodies[i].vx += force_x[i] * time_res / world->bodies[i].m;
world->bodies[i].vy += force_y[i] * time_res / world->bodies[i].m;
// Update positions
world->bodies[i].x += world->bodies[i].vx * time_res;
world->bodies[i].y += world->bodies[i].vy * time_res;
}
}
void step_world(struct world *world, double time_res) {
struct tms ttt;
clock_t start, end;
start = times(&ttt);
position_step(world, time_res);
end = times(&ttt);
total_time += end - start;
collision_step(world);
}
/* Main method runs initialize() and update() */
int main(int argc, char **argv) {
//total_time.tv_sec = 0;
//total_time.tv_usec = 0;
/* get num bodies from the command line */
int num_bodies, threads;
num_bodies = DEF_NUM_BODIES;
threads = 1;
if (argc == 2) {
num_bodies = atoi(argv[1]);
};
int thread_list[13]={1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 20, 30, 40};
FILE* fstream=fopen("outdata","a+");
fprintf(fstream,"Universe has %d bodies\n", num_bodies);
for (int i = 0; i < 13; ++i) {
threads = thread_list[i];
printf("Universe has %d bodies. %d Threads\n", num_bodies, threads);
omp_set_num_threads(threads);
/* set up the universe */
time_t cur_time;
time(&cur_time);
srand48((long) cur_time); // seed the RNG used in create_world
struct world *world = create_world(num_bodies);
/* set up graphics using Xlib */
#if NOT_RUN_ON_PI
Display *disp = XOpenDisplay(NULL);
int scr = DefaultScreen(disp);
Window win = XCreateSimpleWindow(
disp,
RootWindow(disp, scr),
0, 0,
WIDTH, HEIGHT,
0,
BlackPixel(disp, scr), WhitePixel(disp, scr));
XStoreName(disp, win, "N-Body Simulator");
Pixmap back_buf = XCreatePixmap(disp, RootWindow(disp, scr),
WIDTH, HEIGHT, DefaultDepth(disp, scr));
GC gc = XCreateGC(disp, back_buf, 0, 0);
// Make sure we're only looking for messages about closing the window
Atom del_window = XInternAtom(disp, "WM_DELETE_WINDOW", 0);
XSetWMProtocols(disp, win, &del_window, 1);
XSelectInput(disp, win, StructureNotifyMask);
XMapWindow(disp, win);
XEvent event;
// wait until window is mapped
while (1) {
XNextEvent(disp, &event);
if (event.type == MapNotify) {
break;
}
}
#endif
struct timespec delay = {0, 1000000000 / 60}; // for 60 FPS
struct timespec remaining;
double delta_t = 0.1;
int ii;
total_time=0;
for (ii = 0; ii < iteration_times; ii++) {
// check if the window has been closed
#if NOT_RUN_ON_PI
if (XCheckTypedEvent(disp, ClientMessage, &event)) {
break;
}
// we first draw to the back buffer then copy it to the front (`win`)
draw_world(disp, back_buf, gc, world);
XCopyArea(disp, back_buf, win, gc, 0, 0, WIDTH, HEIGHT, 0, 0);
#endif
step_world(world, delta_t);
//if you want to watch the process in 60 FPS
//nanosleep(&delay, &remaining);
}
// printf("Total Time = %f\n", (double)total_time.tv_sec + (double)total_time.tv_usec/1000000);
fprintf(fstream,"%d %lfs\n", threads,(double) total_time / (sysconf(_SC_CLK_TCK)));
#if NOT_RUN_ON_PI
XFreeGC(disp, gc);
XFreePixmap(disp, back_buf);
XDestroyWindow(disp, win);
XCloseDisplay(disp);
#endif
}
fclose(fstream);
return 0;
}
|
Cycle.c | /*
* The MIT License
*
* Copyright 2020 The OpenNARS authors.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "Cycle.h"
//doing inference within the matched concept, returning whether decisionMaking should continue
static Decision Cycle_ActivateSensorimotorConcept(Concept *c, Event *e, long currentTime)
{
Decision decision = {0};
if(e->truth.confidence > MIN_CONFIDENCE)
{
c->usage = Usage_use(c->usage, currentTime, false);
//add event as spike to the concept:
if(e->type == EVENT_TYPE_BELIEF)
{
c->belief_spike = *e;
}
else
{
//pass spike if the concept doesn't have a satisfying motor command
decision = Decision_Suggest(c, e, currentTime);
}
}
return decision;
}
//Process an event, by creating a concept, or activating an existing
static Decision Cycle_ProcessSensorimotorEvent(Event *e, long currentTime)
{
Decision best_decision = {0};
//add a new concept for e if not yet existing
Memory_Conceptualize(&e->term, currentTime);
e->processed = true;
Event_SetTerm(e, e->term); // TODO make sure that hash needs to be calculated once instead already
IN_DEBUG( puts("Event was selected:"); Event_Print(e); )
//determine the concept it is related to
bool e_hasVariable = Variable_hasVariable(&e->term, true, true, true);
#pragma omp parallel for
for(int concept_i=0; concept_i<concepts.itemsAmount; concept_i++)
{
Event ecp = *e;
Concept *c = concepts.items[concept_i].address;
if(!e_hasVariable) //concept matched to the event which doesn't have variables
{
Substitution subs = Variable_Unify(&c->term, &e->term); //concept with variables,
if(subs.success)
{
ecp.term = e->term;
Decision decision = Cycle_ActivateSensorimotorConcept(c, &ecp, currentTime);
if(decision.execute && decision.desire >= best_decision.desire && (!best_decision.specialized || decision.specialized))
{
best_decision = decision;
}
}
}
else
{
Substitution subs = Variable_Unify(&e->term, &c->term); //event with variable matched to concept
if(subs.success)
{
bool success;
ecp.term = Variable_ApplySubstitute(e->term, subs, &success);
if(success)
{
Decision decision = Cycle_ActivateSensorimotorConcept(c, &ecp, currentTime);
if(decision.execute && decision.desire >= best_decision.desire && (!best_decision.specialized || decision.specialized))
{
best_decision = decision;
}
}
}
}
}
return best_decision;
}
void Cycle_PopEvents(Event *selectionArray, double *selectionPriority, int *selectedCnt, PriorityQueue *queue, int cnt)
{
*selectedCnt = 0;
for(int i=0; i<cnt; i++)
{
Event *e;
double priority = 0;
if(!PriorityQueue_PopMax(queue, (void**) &e, &priority))
{
assert(queue->itemsAmount == 0, "No item was popped, only acceptable reason is when it's empty");
IN_DEBUG( puts("Selecting event failed, maybe there is no event left."); )
break;
}
selectionPriority[*selectedCnt] = priority;
selectionArray[*selectedCnt] = *e; //needs to be copied because will be added in a batch
(*selectedCnt)++; //that while processing, would make recycled pointers invalid to use
}
}
//Propagate subgoals, leading to decisions
static Decision Cycle_PropagateSubgoals(long currentTime)
{
Decision best_decision = {0};
//pass goal spikes on to the next
for(int i=0; i<goalsSelectedCnt; i++)
{
Event *goal = &selectedGoals[i];
IN_DEBUG( fputs("selected goal ", stdout); Narsese_PrintTerm(&goal->term); puts(""); )
Decision decision = Cycle_ProcessSensorimotorEvent(goal, currentTime);
if(decision.execute && decision.desire > best_decision.desire && (!best_decision.specialized || decision.specialized))
{
best_decision = decision;
}
#pragma omp parallel for
for(int concept_i=0; concept_i<concepts.itemsAmount; concept_i++)
{
Concept *c = concepts.items[concept_i].address;
if(Variable_Unify(&c->term, &goal->term).success) //could be <a --> M>! matching to some <... =/> <$1 --> M>>.
{
bool revised;
c->goal_spike = Inference_RevisionAndChoice(&c->goal_spike, goal, currentTime, &revised);
selectedGoals[i] = c->goal_spike;
for(int opi=0; opi<OPERATIONS_MAX; opi++)
{
for(int j=0; j<c->precondition_beliefs[opi].itemsAmount; j++)
{
Implication *imp = &c->precondition_beliefs[opi].array[j];
if(!Memory_ImplicationValid(imp))
{
Table_Remove(&c->precondition_beliefs[opi], j);
j--;
continue;
}
Event newGoal = Inference_GoalDeduction(&c->goal_spike, imp);
Event newGoalUpdated = Inference_EventUpdate(&newGoal, currentTime);
IN_DEBUG( fputs("derived goal ", stdout); Narsese_PrintTerm(&newGoalUpdated.term); puts(""); )
Memory_AddEvent(&newGoalUpdated, currentTime, selectedGoalsPriority[i] * Truth_Expectation(newGoalUpdated.truth), 0, false, true, false, false, false);
}
}
}
}
}
return best_decision;
}
//Reinforce link between concept a and b (creating it if non-existent)
static void Cycle_ReinforceLink(Event *a, Event *b)
{
if(a->type != EVENT_TYPE_BELIEF || b->type != EVENT_TYPE_BELIEF)
{
return;
}
Term a_term_nop = Narsese_GetPreconditionWithoutOp(&a->term);
Concept *A = Memory_FindConceptByTerm(&a_term_nop);
Concept *B = Memory_FindConceptByTerm(&b->term);
if(A != NULL && B != NULL && A != B)
{
//temporal induction
if(!Stamp_checkOverlap(&a->stamp, &b->stamp))
{
bool success;
Implication precondition_implication = Inference_BeliefInduction(a, b, &success);
if(success)
{
precondition_implication.sourceConcept = A;
precondition_implication.sourceConceptId = A->id;
if(precondition_implication.truth.confidence >= MIN_CONFIDENCE)
{
bool success;
Term general_implication_term = IntroduceImplicationVariables(precondition_implication.term, &success);
if(success && Variable_hasVariable(&general_implication_term, true, true, false))
{
NAL_DerivedEvent(general_implication_term, OCCURRENCE_ETERNAL, precondition_implication.truth, precondition_implication.stamp, currentTime, 1, 1, precondition_implication.occurrenceTimeOffset, NULL, 0);
}
int operationID = Narsese_getOperationID(&a->term);
IN_DEBUG( fputs("Formed implication: ", stdout); Narsese_PrintTerm(&precondition_implication.term); Truth_Print(&precondition_implication.truth); puts("\n"); )
Implication *revised_precon = Table_AddAndRevise(&B->precondition_beliefs[operationID], &precondition_implication);
if(revised_precon != NULL)
{
revised_precon->creationTime = currentTime; //for evaluation
revised_precon->sourceConcept = A;
revised_precon->sourceConceptId = A->id;
/*IN_DEBUG( fputs("REVISED pre-condition implication: ", stdout); Implication_Print(revised_precon); )*/
Memory_printAddedImplication(&revised_precon->term, &revised_precon->truth, false, revised_precon->truth.confidence > precondition_implication.truth.confidence);
}
}
}
}
}
}
void Cycle_PushEvents(long currentTime)
{
for(int i=0; i<beliefsSelectedCnt; i++)
{
Memory_AddEvent(&selectedBeliefs[i], currentTime, selectedBeliefsPriority[i], 0, false, false, true, false, false);
}
for(int i=0; i<goalsSelectedCnt; i++)
{
Memory_AddEvent(&selectedGoals[i], currentTime, selectedGoalsPriority[i], 0, false, false, true, false, false);
}
}
void Cycle_ProcessInputBeliefEvents(long currentTime)
{
//1. process newest event
if(belief_events.itemsAmount > 0)
{
//form concepts for the sequences of different length
for(int len=0; len<MAX_SEQUENCE_LEN; len++)
{
Event *toProcess = FIFO_GetNewestSequence(&belief_events, len);
if(toProcess != NULL && !toProcess->processed && toProcess->type != EVENT_TYPE_DELETED)
{
assert(toProcess->type == EVENT_TYPE_BELIEF, "A different event type made it into belief events!");
Cycle_ProcessSensorimotorEvent(toProcess, currentTime);
Event postcondition = *toProcess;
//Mine for <(&/,precondition,operation) =/> postcondition> patterns in the FIFO:
if(len == 0) //postcondition always len1
{
int op_id = Narsese_getOperationID(&postcondition.term);
Decision_AssumptionOfFailure(op_id, currentTime); //collection of negative evidence, new way
//build link between internal derivations and external event to explain it:
for(int k=0; k<beliefsSelectedCnt; k++)
{
if(selectedBeliefs[k].occurrenceTime < postcondition.occurrenceTime)
{
Cycle_ReinforceLink(&selectedBeliefs[k], &postcondition);
}
}
for(int k=1; k<belief_events.itemsAmount; k++)
{
for(int len2=0; len2<MAX_SEQUENCE_LEN; len2++)
{
Event *precondition = FIFO_GetKthNewestSequence(&belief_events, k, len2);
if(precondition != NULL && precondition->type != EVENT_TYPE_DELETED)
{
Term precond = Narsese_GetPreconditionWithoutOp(&precondition->term); //a or (&/,a,op)
for(int i=0; i<COMPOUND_TERM_SIZE_MAX; i++)
{
if(Narsese_isOperator(precond.atoms[i]))
{
goto NoReinforce; //if there is an op in a, then a longer sequ has also, try different k
}
}
Cycle_ReinforceLink(precondition, &postcondition);
NoReinforce:;
}
}
}
}
}
}
}
}
void Cycle_ProcessInputGoalEvents(long currentTime)
{
//process goals
Decision decision = {0};
if(goal_events.itemsAmount > 0)
{
Event *goal = FIFO_GetNewestSequence(&goal_events, 0);
if(!goal->processed && goal->type!=EVENT_TYPE_DELETED)
{
assert(goal->type == EVENT_TYPE_GOAL, "A different event type made it into goal events!");
decision = Cycle_ProcessSensorimotorEvent(goal, currentTime);
}
}
//allow reasoning into the future by propagating spikes from goals back to potential current events
if(!decision.execute)
{
decision = Cycle_PropagateSubgoals(currentTime);
}
if(decision.execute && decision.operationID > 0)
{
Decision_Execute(&decision);
//reset cycling goal events after execution to avoid "residue actions"
PriorityQueue_RESET(&cycling_goal_events, cycling_goal_events.items, cycling_goal_events.maxElements);
}
}
void Cycle_Inference(long currentTime)
{
//Inferences
#if STAGE==2
for(int i=0; i<beliefsSelectedCnt; i++)
{
long countConceptsMatched = 0;
bool fired[CONCEPTS_MAX] = {0}; //whether a concept already fired
for(;;)
{
long countConceptsMatchedNew = 0;
//Adjust dynamic firing threshold: (proportional "self"-control)
double conceptPriorityThresholdCurrent = conceptPriorityThreshold;
long countConceptsMatchedAverage = Stats_countConceptsMatchedTotal / currentTime;
double set_point = BELIEF_CONCEPT_MATCH_TARGET;
double process_value = countConceptsMatchedAverage;
double error = process_value - set_point;
double increment = error*CONCEPT_THRESHOLD_ADAPTATION;
conceptPriorityThreshold = MIN(1.0, MAX(0.0, conceptPriorityThreshold + increment));
//IN_DEBUG( printf("conceptPriorityThreshold=%f\n", conceptPriorityThreshold); )
Event *e = &selectedBeliefs[i];
Term subterms_of_e[2] = {0}; //subterms up to level 1
for(int j=0; j<2; j++)
{
subterms_of_e[j] = Term_ExtractSubterm(&e->term, j+1);
}
double priority = selectedBeliefsPriority[i];
Term dummy_term = {0};
Truth dummy_truth = {0};
RuleTable_Apply(e->term, dummy_term, e->truth, dummy_truth, e->occurrenceTime, e->stamp, currentTime, priority, 1, false, NULL, 0);
IN_DEBUG( puts("Event was selected:"); Event_Print(e); )
//Main inference loop:
#pragma omp parallel for
for(int j=0; j<concepts.itemsAmount; j++)
{
Concept *c = concepts.items[j].address;
long validation_cid = c->id; //allows for lockfree rule table application (only adding to memory is locked)
if(fired[j] || c->priority < conceptPriorityThresholdCurrent)
{
continue;
}
fired[j] = true;
//first filter based on common term (semantic relationship)
bool has_common_term = false;
for(int k=0; k<2; k++)
{
Term current = Term_ExtractSubterm(&c->term, k+1);
for(int h=0; h<2; h++)
{
if(current.atoms[0] != 0 && subterms_of_e[h].atoms[0] != 0)
{
if(Term_Equal(¤t, &subterms_of_e[h]))
{
has_common_term = true;
goto PROCEED;
}
}
}
}
PROCEED:;
//second filter based on precondition implication (temporal relationship)
bool is_temporally_related = false;
for(int k=0; k<c->precondition_beliefs[0].itemsAmount; k++)
{
Implication imp = c->precondition_beliefs[0].array[k];
Term subject = Term_ExtractSubterm(&imp.term, 1);
if(Variable_Unify(&subject, &e->term).success)
{
is_temporally_related = true;
break;
}
}
if(has_common_term)
{
#pragma omp critical(stats)
{
countConceptsMatchedNew++;
countConceptsMatched++;
Stats_countConceptsMatchedTotal++;
}
}
if(has_common_term && c->belief.type != EVENT_TYPE_DELETED)
{
//use eternal belief as belief
Event* belief = &c->belief;
Event future_belief = c->predicted_belief;
//but if there is a predicted one in the event's window, use this one
if(e->occurrenceTime != OCCURRENCE_ETERNAL && future_belief.type != EVENT_TYPE_DELETED &&
abs(e->occurrenceTime - future_belief.occurrenceTime) < EVENT_BELIEF_DISTANCE) //take event as belief if it's stronger
{
future_belief.truth = Truth_Projection(future_belief.truth, future_belief.occurrenceTime, e->occurrenceTime);
future_belief.occurrenceTime = e->occurrenceTime;
belief = &future_belief;
}
//unless there is an actual belief which falls into the event's window
Event project_belief = c->belief_spike;
if(e->occurrenceTime != OCCURRENCE_ETERNAL && project_belief.type != EVENT_TYPE_DELETED &&
abs(e->occurrenceTime - project_belief.occurrenceTime) < EVENT_BELIEF_DISTANCE) //take event as belief if it's stronger
{
project_belief.truth = Truth_Projection(project_belief.truth, project_belief.occurrenceTime, e->occurrenceTime);
project_belief.occurrenceTime = e->occurrenceTime;
belief = &project_belief;
}
//Check for overlap and apply inference rules
if(!Stamp_checkOverlap(&e->stamp, &belief->stamp))
{
Stamp stamp = Stamp_make(&e->stamp, &belief->stamp);
if(PRINT_CONTROL_INFO)
{
fputs("Apply rule table on ", stdout);
Narsese_PrintTerm(&e->term);
printf(" Priority=%f\n", priority);
fputs(" and ", stdout);
Narsese_PrintTerm(&c->term);
puts("");
}
RuleTable_Apply(e->term, c->term, e->truth, belief->truth, e->occurrenceTime, stamp, currentTime, priority, c->priority, true, c, validation_cid);
}
}
if(is_temporally_related)
{
for(int i=0; i<c->precondition_beliefs[0].itemsAmount; i++)
{
Implication *imp = &c->precondition_beliefs[0].array[i];
assert(Narsese_copulaEquals(imp->term.atoms[0],'$'), "Not a valid implication term!");
Term precondition_with_op = Term_ExtractSubterm(&imp->term, 1);
Term precondition = Narsese_GetPreconditionWithoutOp(&precondition_with_op);
Substitution subs = Variable_Unify(&precondition, &e->term);
if(subs.success)
{
Implication updated_imp = *imp;
bool success;
updated_imp.term = Variable_ApplySubstitute(updated_imp.term, subs, &success);
if(success)
{
Event predicted = Inference_BeliefDeduction(e, &updated_imp);
NAL_DerivedEvent(predicted.term, predicted.occurrenceTime, predicted.truth, predicted.stamp, currentTime, priority, Truth_Expectation(imp->truth), 0, c, validation_cid);
}
}
}
}
}
if(countConceptsMatched > Stats_countConceptsMatchedMax)
{
Stats_countConceptsMatchedMax = countConceptsMatched;
}
if(countConceptsMatched >= BELIEF_CONCEPT_MATCH_TARGET || countConceptsMatchedNew == 0)
{
break;
}
}
}
#endif
}
void Cycle_RelativeForgetting(long currentTime)
{
//Apply event forgetting:
for(int i=0; i<cycling_belief_events.itemsAmount; i++)
{
cycling_belief_events.items[i].priority *= EVENT_DURABILITY;
}
for(int i=0; i<cycling_goal_events.itemsAmount; i++)
{
cycling_goal_events.items[i].priority *= EVENT_DURABILITY;
}
//Apply concept forgetting:
for(int i=0; i<concepts.itemsAmount; i++)
{
Concept *c = concepts.items[i].address;
c->priority *= CONCEPT_DURABILITY;
concepts.items[i].priority = Usage_usefulness(c->usage, currentTime); //how concept memory is sorted by, by concept usefulness
}
//BEGIN SPECIAL HANDLING FOR USER KNOWLEDGE
if(ontology_handling)
{
//BEGIN SPECIAL HANDLING FOR USER KNOWLEDGE
for(int i=0; i<concepts.itemsAmount; i++)
{
Concept *c = concepts.items[i].address;
if(c->hasUserKnowledge)
{
c->usage = Usage_use(c->usage, currentTime, false); //user implication won't be forgotten
}
}
}
//END SPECIAL HANDLING FOR USER KNOWLEDGE
//Re-sort queues
PriorityQueue_Rebuild(&concepts);
PriorityQueue_Rebuild(&cycling_belief_events);
PriorityQueue_Rebuild(&cycling_goal_events);
}
void Cycle_Perform(long currentTime)
{
Metric_send("NARNode.Cycle", 1);
//1. Retrieve BELIEF/GOAL_EVENT_SELECTIONS events from cyclings events priority queue (which includes both input and derivations)
Cycle_PopEvents(selectedGoals, selectedGoalsPriority, &goalsSelectedCnt, &cycling_goal_events, GOAL_EVENT_SELECTIONS);
Cycle_PopEvents(selectedBeliefs, selectedBeliefsPriority, &beliefsSelectedCnt, &cycling_belief_events, BELIEF_EVENT_SELECTIONS);
//2. Process incoming belief events from FIFO, building implications utilizing input sequences and in 1. retrieved events.
Cycle_ProcessInputBeliefEvents(currentTime);
//3. Process incoming goal events from FIFO, propagating subgoals according to implications, triggering decisions when above decision threshold
Cycle_ProcessInputGoalEvents(currentTime);
//4. Perform inference between in 1. retrieved events and semantically/temporally related, high-priority concepts to derive and process new events
Cycle_Inference(currentTime);
//5. Apply relative forgetting for concepts according to CONCEPT_DURABILITY and events according to BELIEF_EVENT_DURABILITY
Cycle_RelativeForgetting(currentTime);
//6. Push in 1. selected events back to the queue as well, applying relative forgetting based on BELIEF_EVENT_DURABILITY_ON_USAGE
Cycle_PushEvents(currentTime);
}
|
DRB021-reductionmissing-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
A kernel with two level parallelizable loop with reduction:
if reduction(+:sum) is missing, there is race condition.
Data race pairs: we allow multiple pairs to preserve the pattern.
sum@70:7 vs. sum@70:7
sum@70:7 vs. sum@70:13
*/
#include <stdio.h>
int main(int argc, char* argv[])
{
int i,j;
float temp, sum=0.0;
int len=100;
float u[100][100];
#pragma omp parallel for
for (i = 0; i < len; i++)
#pragma omp parallel for
for (j = 0; j < len; j++)
u[i][j] = 0.5;
#pragma omp parallel for private(temp) reduction(+:sum)
for (i = 0; i < len; i++)
#pragma omp parallel for private(temp) reduction(+:sum)
for (j = 0; j < len; j++)
{
temp = u[i][j];
sum = sum + temp * temp;
}
printf ("sum = %f\n", sum);
return 0;
}
|
148.c | // this source is derived from CHILL AST originally from file '/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/heat-3d/kernel.c' as parsed by frontend compiler rose
void kernel_heat_3d(int tsteps, int n, double A[200 + 0][200 + 0][200 + 0], double B[200 + 0][200 + 0][200 + 0]) {
int t12;
int t10;
int t8;
int t6;
int t4;
int t2;
for (t2 = 1; t2 <= 1000; t2 += 1) {
#pragma omp parallel for private(t4,t6,t8,t10,t12,t14)
for (t4 = 1; t4 <= n - 2; t4 += 32)
for (t6 = t4; t6 <= (t4 + 31 < n - 2 ? t4 + 31 : n - 2); t6 += 1)
for (t8 = 1; t8 <= n - 2; t8 += 32)
for (t10 = t8; t10 <= (t8 + 31 < n - 2 ? t8 + 31 : n - 2); t10 += 1)
for (t12 = 1; t12 <= n - 2; t12 += 1)
B[t6][t10][t12] = 0.125 * (A[t6 + 1][t10][t12] - 2 * A[t6][t10][t12] + A[t6 - 1][t10][t12]) + 0.125 * (A[t6][t10 + 1][t12] - 2 * A[t6][t10][t12] + A[t6][t10 - 1][t12]) + 0.125 * (A[t6][t10][t12 + 1] - 2 * A[t6][t10][t12] + A[t6][t10][t12 - 1]) + A[t6][t10][t12];
#pragma omp parallel for private(t4,t6,t8,t10,t12,t14)
for (t4 = 1; t4 <= n - 2; t4 += 32)
for (t6 = t4; t6 <= (t4 + 31 < n - 2 ? t4 + 31 : n - 2); t6 += 1)
for (t8 = 1; t8 <= n - 2; t8 += 32)
for (t10 = t8; t10 <= (t8 + 31 < n - 2 ? t8 + 31 : n - 2); t10 += 1)
for (t12 = 1; t12 <= n - 2; t12 += 1)
A[t6][t10][t12] = 0.125 * (B[t6 + 1][t10][t12] - 2 * B[t6][t10][t12] + B[t6 - 1][t10][t12]) + 0.125 * (B[t6][t10 + 1][t12] - 2 * B[t6][t10][t12] + B[t6][t10 - 1][t12]) + 0.125 * (B[t6][t10][t12 + 1] - 2 * B[t6][t10][t12] + B[t6][t10][t12 - 1]) + B[t6][t10][t12];
}
}
|
par_interp.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision: 2.33 $
***********************************************************************EHEADER*/
#include "_hypre_parcsr_ls.h"
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildInterp
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildInterp( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_Int *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
double trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
double *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
double *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
hypre_ParCSRMatrix *P;
HYPRE_Int *col_map_offd_P;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
hypre_CSRMatrix *A_ext;
double *A_ext_data = NULL;
HYPRE_Int *A_ext_i = NULL;
HYPRE_Int *A_ext_j = NULL;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
double *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
double *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int *P_marker, *P_marker_offd;
HYPRE_Int jj_counter,jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
HYPRE_Int jj_begin_row,jj_begin_row_offd;
HYPRE_Int jj_end_row,jj_end_row_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int strong_f_marker;
HYPRE_Int *fine_to_coarse;
HYPRE_Int *fine_to_coarse_offd;
HYPRE_Int *coarse_counter;
HYPRE_Int coarse_shift;
HYPRE_Int total_global_cpts;
HYPRE_Int num_cols_P_offd,my_first_cpt;
HYPRE_Int i,i1,i2;
HYPRE_Int j,jl,jj,jj1;
HYPRE_Int k,kc;
HYPRE_Int start;
HYPRE_Int sgn;
HYPRE_Int c_num;
double diagonal;
double sum;
double distribute;
double zero = 0.0;
double one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int print_level = 0;
HYPRE_Int *int_buf_data;
HYPRE_Int col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int col_n = col_1 + local_numrows;
double wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
#ifdef HYPRE_NO_GLOBAL_PARTITION
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm);
#else
my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag < 0)
{
debug_flag = -debug_flag;
print_level = 1;
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
if (num_functions > 1 && num_cols_A_offd)
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends));
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (num_functions > 1)
{
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*----------------------------------------------------------------------
* Get the ghost rows of A
*---------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_procs > 1)
{
A_ext = hypre_ParCSRMatrixExtractBExt(A,A,1);
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
}
index = 0;
for (i=0; i < num_cols_A_offd; i++)
{
for (j=A_ext_i[i]; j < A_ext_i[i+1]; j++)
{
k = A_ext_j[j];
if (k >= col_1 && k < col_n)
{
A_ext_j[index] = k - col_1;
A_ext_data[index++] = A_ext_data[j];
}
else
{
kc = hypre_BinarySearch(col_map_offd,k,num_cols_A_offd);
if (kc > -1)
{
A_ext_j[index] = -kc-1;
A_ext_data[index++] = A_ext_data[j];
}
}
}
A_ext_i[i] = index;
}
for (i = num_cols_A_offd; i > 0; i--)
A_ext_i[i] = A_ext_i[i-1];
if (num_procs > 1) A_ext_i[0] = 0;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 2 Get A_ext = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
jj_count[j]++;
fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i.
*--------------------------------------------------------------------*/
else
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] >= 0)
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i=0; i < num_threads-1; i++)
{
coarse_counter[i+1] += coarse_counter[i];
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size);
P_diag_data = hypre_CTAlloc(double, P_diag_size);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size);
P_offd_data = hypre_CTAlloc(double, P_offd_size);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) coarse_shift = coarse_counter[j-1];
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
fine_to_coarse[i] += my_first_cpt+coarse_shift;
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n",
my_id, wall_time);
fflush(NULL);
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,diagonal,distribute,P_marker,P_marker_offd,strong_f_marker,jj_counter,jj_counter_offd,sgn,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE
#endif
for (jl = 0; jl < num_threads; jl++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (jl < rest)
{
ns = jl*size+jl;
ne = (jl+1)*size+jl+1;
}
else
{
ns = jl*size+rest;
ne = (jl+1)*size+rest;
}
jj_counter = 0;
if (jl > 0) jj_counter = jj_count[jl-1];
jj_counter_offd = 0;
if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine);
if (num_cols_A_offd)
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
else
P_marker_offd = NULL;
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < num_cols_A_offd; i++)
{
P_marker_offd[i] = -1;
}
strong_f_marker = -2;
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
jj_begin_row = jj_counter;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
/*--------------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*--------------------------------------------------------------*/
else if (CF_marker[i1] != -3)
{
P_marker[i1] = strong_f_marker;
}
}
jj_end_row = jj_counter;
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
jj_begin_row_offd = jj_counter_offd;
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
/*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
/*-----------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*-----------------------------------------------------------*/
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
/*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
/*-----------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*-----------------------------------------------------------*/
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
}
}
}
}
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
/* Loop over ith row of A. First, the diagonal part of A */
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
/*--------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and strongly influences i,
* distribute a_{i,i1} to C-points that strongly infuence i.
* Note: currently no distribution to the diagonal in this case.
*--------------------------------------------------------------*/
else if (P_marker[i1] == strong_f_marker)
{
sum = zero;
/*-----------------------------------------------------------
* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*-----------------------------------------------------------*/
sgn = 1;
if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1;
/* Diagonal block part of row i1 */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row &&
(sgn*A_diag_data[jj1]) < 0)
{
sum += A_diag_data[jj1];
}
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd
&& (sgn*A_offd_data[jj1]) < 0)
{
sum += A_offd_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_diag_data[jj] / sum;
/*-----------------------------------------------------------
* Loop over row of A for point i1 and do the distribution.
*-----------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_diag_data[jj1]) < 0)
{
P_diag_data[P_marker[i2]]
+= distribute * A_diag_data[jj1];
}
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd
&& (sgn*A_offd_data[jj1]) < 0)
{
P_offd_data[P_marker_offd[i2]]
+= distribute * A_offd_data[jj1];
}
}
}
}
else
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
/*--------------------------------------------------------------
* Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}
* into the diagonal.
*--------------------------------------------------------------*/
else if (CF_marker[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
/*----------------------------------------------------------------
* Still looping over ith row of A. Next, loop over the
* off-diagonal part of A
*---------------------------------------------------------------*/
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker_offd[i1] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
}
/*------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and strongly influences i,
* distribute a_{i,i1} to C-points that strongly infuence i.
* Note: currently no distribution to the diagonal in this case.
*-----------------------------------------------------------*/
else if (P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
/*---------------------------------------------------------
* Loop over row of A_ext for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*---------------------------------------------------------*/
/* find row number */
c_num = A_offd_j[jj];
sgn = 1;
if (A_ext_data[A_ext_i[c_num]] < 0) sgn = -1;
for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)
{
i2 = A_ext_j[jj1];
if (i2 > -1)
{
/* in the diagonal block */
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_ext_data[jj1]) < 0)
{
sum += A_ext_data[jj1];
}
}
else
{
/* in the off_diagonal block */
if (P_marker_offd[-i2-1] >= jj_begin_row_offd
&& (sgn*A_ext_data[jj1]) < 0)
{
sum += A_ext_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_offd_data[jj] / sum;
/*---------------------------------------------------------
* Loop over row of A_ext for point i1 and do
* the distribution.
*--------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)
{
i2 = A_ext_j[jj1];
if (i2 > -1) /* in the diagonal block */
{
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_ext_data[jj1]) < 0)
{
P_diag_data[P_marker[i2]]
+= distribute * A_ext_data[jj1];
}
}
else
{
/* in the off_diagonal block */
if (P_marker_offd[-i2-1] >= jj_begin_row_offd
&& (sgn*A_ext_data[jj1]) < 0)
P_offd_data[P_marker_offd[-i2-1]]
+= distribute * A_ext_data[jj1];
}
}
}
else
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
/*-----------------------------------------------------------
* Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}
* into the diagonal.
*-----------------------------------------------------------*/
else if (CF_marker_offd[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
}
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
if (diagonal == 0.0)
{
if (print_level)
hypre_printf(" Warning! zero diagonal! Proc id %d row %d\n", my_id,i);
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] = 0.0;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] = 0.0;
}
}
else
{
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] /= -diagonal;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] /= -diagonal;
}
}
}
strong_f_marker--;
P_offd_i[i+1] = jj_counter_offd;
}
hypre_TFree(P_marker);
hypre_TFree(P_marker_offd);
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
num_cols_P_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_cols_A_offd; i++)
P_marker[i] = 0;
num_cols_P_offd = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_Int,num_cols_P_offd);
index = 0;
for (i=0; i < num_cols_P_offd; i++)
{
while (P_marker[index]==0) index++;
col_map_offd_P[i] = index++;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(col_map_offd_P,
P_offd_j[i],
num_cols_P_offd);
hypre_TFree(P_marker);
}
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_GetCommPkgRTFromCommPkgA(P,A, fine_to_coarse_offd);
*P_ptr = P;
hypre_TFree(CF_marker_offd);
hypre_TFree(dof_func_offd);
hypre_TFree(int_buf_data);
hypre_TFree(fine_to_coarse);
hypre_TFree(fine_to_coarse_offd);
hypre_TFree(coarse_counter);
hypre_TFree(jj_count);
hypre_TFree(jj_count_offd);
if (num_procs > 1) hypre_CSRMatrixDestroy(A_ext);
return(0);
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildInterpHE
* interpolation routine for hyperbolic PDEs
* treats weak fine connections like strong fine connections
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildInterpHE( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_Int *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
double trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
double *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
double *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
hypre_ParCSRMatrix *P;
HYPRE_Int *col_map_offd_P;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
hypre_CSRMatrix *A_ext;
double *A_ext_data = NULL;
HYPRE_Int *A_ext_i = NULL;
HYPRE_Int *A_ext_j = NULL;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
double *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
double *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int *P_marker, *P_marker_offd;
HYPRE_Int jj_counter,jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
HYPRE_Int jj_begin_row,jj_begin_row_offd;
HYPRE_Int jj_end_row,jj_end_row_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int *fine_to_coarse;
HYPRE_Int *fine_to_coarse_offd;
HYPRE_Int *coarse_counter;
HYPRE_Int coarse_shift;
HYPRE_Int total_global_cpts;
HYPRE_Int num_cols_P_offd,my_first_cpt;
HYPRE_Int i,i1,i2;
HYPRE_Int j,jl,jj,jj1;
HYPRE_Int k, kc;
HYPRE_Int start;
HYPRE_Int sgn;
HYPRE_Int c_num;
double diagonal;
double sum;
double distribute;
double zero = 0.0;
double one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int *int_buf_data;
HYPRE_Int col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int col_n = col_1 + local_numrows;
double wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
#ifdef HYPRE_NO_GLOBAL_PARTITION
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm);
#else
my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
if (num_functions > 1 && num_cols_A_offd)
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends));
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (num_functions > 1)
{
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*----------------------------------------------------------------------
* Get the ghost rows of A
*---------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_procs > 1)
{
A_ext = hypre_ParCSRMatrixExtractBExt(A,A,1);
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
}
index = 0;
for (i=0; i < num_cols_A_offd; i++)
{
for (j=A_ext_i[i]; j < A_ext_i[i+1]; j++)
{
k = A_ext_j[j];
if (k >= col_1 && k < col_n)
{
A_ext_j[index] = k - col_1;
A_ext_data[index++] = A_ext_data[j];
}
else
{
kc = hypre_BinarySearch(col_map_offd,k,num_cols_A_offd);
if (kc > -1)
{
A_ext_j[index] = -kc-1;
A_ext_data[index++] = A_ext_data[j];
}
}
}
A_ext_i[i] = index;
}
for (i = num_cols_A_offd; i > 0; i--)
A_ext_i[i] = A_ext_i[i-1];
if (num_procs > 1) A_ext_i[0] = 0;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 2 Get A_ext = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
jj_count[j]++;
fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i.
*--------------------------------------------------------------------*/
else
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] >= 0)
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i=0; i < num_threads-1; i++)
{
coarse_counter[i+1] += coarse_counter[i];
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size);
P_diag_data = hypre_CTAlloc(double, P_diag_size);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size);
P_offd_data = hypre_CTAlloc(double, P_offd_size);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) coarse_shift = coarse_counter[j-1];
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
fine_to_coarse[i] += my_first_cpt+coarse_shift;
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n",
my_id, wall_time);
fflush(NULL);
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,diagonal,distribute,P_marker,P_marker_offd,jj_counter,jj_counter_offd,sgn,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE
#endif
for (jl = 0; jl < num_threads; jl++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (jl < rest)
{
ns = jl*size+jl;
ne = (jl+1)*size+jl+1;
}
else
{
ns = jl*size+rest;
ne = (jl+1)*size+rest;
}
jj_counter = 0;
if (jl > 0) jj_counter = jj_count[jl-1];
jj_counter_offd = 0;
if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine);
if (num_cols_A_offd)
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
else
P_marker_offd = NULL;
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < num_cols_A_offd; i++)
{
P_marker_offd[i] = -1;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
jj_begin_row = jj_counter;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
jj_end_row = jj_counter;
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
jj_begin_row_offd = jj_counter_offd;
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
/* Loop over ith row of A. First, the diagonal part of A */
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
/*--------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and influences i,
* distribute a_{i,i1} to C-points that strongly influence i.
* Note: currently no distribution to the diagonal in this case.
*--------------------------------------------------------------*/
else
{
sum = zero;
/*-----------------------------------------------------------
* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*-----------------------------------------------------------*/
sgn = 1;
if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1;
/* Diagonal block part of row i1 */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row &&
(sgn*A_diag_data[jj1]) < 0)
{
sum += A_diag_data[jj1];
}
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd
&& (sgn*A_offd_data[jj1]) < 0)
{
sum += A_offd_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_diag_data[jj] / sum;
/*-----------------------------------------------------------
* Loop over row of A for point i1 and do the distribution.
*-----------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_diag_data[jj1]) < 0)
{
P_diag_data[P_marker[i2]]
+= distribute * A_diag_data[jj1];
}
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd
&& (sgn*A_offd_data[jj1]) < 0)
{
P_offd_data[P_marker_offd[i2]]
+= distribute * A_offd_data[jj1];
}
}
}
}
else
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
}
/*----------------------------------------------------------------
* Still looping over ith row of A. Next, loop over the
* off-diagonal part of A
*---------------------------------------------------------------*/
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker_offd[i1] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
}
/*------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and influences i,
* distribute a_{i,i1} to C-points that strongly infuence i.
* Note: currently no distribution to the diagonal in this case.
*-----------------------------------------------------------*/
else
{
sum = zero;
/*---------------------------------------------------------
* Loop over row of A_ext for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*---------------------------------------------------------*/
/* find row number */
c_num = A_offd_j[jj];
sgn = 1;
if (A_ext_data[A_ext_i[c_num]] < 0) sgn = -1;
for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)
{
i2 = A_ext_j[jj1];
if (i2 > -1)
{
/* in the diagonal block */
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_ext_data[jj1]) < 0)
{
sum += A_ext_data[jj1];
}
}
else
{
/* in the off_diagonal block */
if (P_marker_offd[-i2-1] >= jj_begin_row_offd
&& (sgn*A_ext_data[jj1]) < 0)
{
sum += A_ext_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_offd_data[jj] / sum;
/*---------------------------------------------------------
* Loop over row of A_ext for point i1 and do
* the distribution.
*--------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)
{
i2 = A_ext_j[jj1];
if (i2 > -1) /* in the diagonal block */
{
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_ext_data[jj1]) < 0)
{
P_diag_data[P_marker[i2]]
+= distribute * A_ext_data[jj1];
}
}
else
{
/* in the off_diagonal block */
if (P_marker_offd[-i2-1] >= jj_begin_row_offd
&& (sgn*A_ext_data[jj1]) < 0)
P_offd_data[P_marker_offd[-i2-1]]
+= distribute * A_ext_data[jj1];
}
}
}
else
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
}
}
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] /= -diagonal;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] /= -diagonal;
}
}
P_offd_i[i+1] = jj_counter_offd;
}
hypre_TFree(P_marker);
hypre_TFree(P_marker_offd);
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
num_cols_P_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_cols_A_offd; i++)
P_marker[i] = 0;
num_cols_P_offd = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_Int,num_cols_P_offd);
index = 0;
for (i=0; i < num_cols_P_offd; i++)
{
while (P_marker[index]==0) index++;
col_map_offd_P[i] = index++;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(col_map_offd_P,
P_offd_j[i],
num_cols_P_offd);
hypre_TFree(P_marker);
}
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_GetCommPkgRTFromCommPkgA(P,A,fine_to_coarse_offd);
*P_ptr = P;
hypre_TFree(CF_marker_offd);
hypre_TFree(dof_func_offd);
hypre_TFree(int_buf_data);
hypre_TFree(fine_to_coarse);
hypre_TFree(fine_to_coarse_offd);
hypre_TFree(coarse_counter);
hypre_TFree(jj_count);
hypre_TFree(jj_count_offd);
if (num_procs > 1) hypre_CSRMatrixDestroy(A_ext);
return(0);
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildDirInterp
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildDirInterp( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_Int *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
double trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
double *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
double *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
hypre_ParCSRMatrix *P;
HYPRE_Int *col_map_offd_P;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
double *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
double *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int *P_marker, *P_marker_offd;
HYPRE_Int jj_counter,jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
HYPRE_Int jj_begin_row,jj_begin_row_offd;
HYPRE_Int jj_end_row,jj_end_row_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int *fine_to_coarse;
HYPRE_Int *fine_to_coarse_offd;
HYPRE_Int *coarse_counter;
HYPRE_Int coarse_shift;
HYPRE_Int total_global_cpts;
HYPRE_Int num_cols_P_offd,my_first_cpt;
HYPRE_Int i,i1;
HYPRE_Int j,jl,jj;
HYPRE_Int start;
double diagonal;
double sum_N_pos, sum_P_pos;
double sum_N_neg, sum_P_neg;
double alfa = 1.0;
double beta = 1.0;
double zero = 0.0;
double one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int *int_buf_data;
double wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
#ifdef HYPRE_NO_GLOBAL_PARTITION
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm);
#else
my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
if (num_functions > 1 && num_cols_A_offd)
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends));
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (num_functions > 1)
{
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
jj_count[j]++;
fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i.
*--------------------------------------------------------------------*/
else
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] > 0)
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
if (CF_marker_offd[i1] > 0)
{
jj_count_offd[j]++;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] > 0)
{
jj_count_offd[j]++;
}
}
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i=0; i < num_threads-1; i++)
{
coarse_counter[i+1] += coarse_counter[i];
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size);
P_diag_data = hypre_CTAlloc(double, P_diag_size);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size);
P_offd_data = hypre_CTAlloc(double, P_offd_size);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) coarse_shift = coarse_counter[j-1];
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
fine_to_coarse[i] += my_first_cpt+coarse_shift;
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n",
my_id, wall_time);
fflush(NULL);
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,diagonal,P_marker,P_marker_offd,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE
#endif
for (jl = 0; jl < num_threads; jl++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (jl < rest)
{
ns = jl*size+jl;
ne = (jl+1)*size+jl+1;
}
else
{
ns = jl*size+rest;
ne = (jl+1)*size+rest;
}
jj_counter = 0;
if (jl > 0) jj_counter = jj_count[jl-1];
jj_counter_offd = 0;
if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine);
if (num_cols_A_offd)
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
else
P_marker_offd = NULL;
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < num_cols_A_offd; i++)
{
P_marker_offd[i] = -1;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
jj_begin_row = jj_counter;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
jj_end_row = jj_counter;
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
jj_begin_row_offd = jj_counter_offd;
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
/* Loop over ith row of A. First, the diagonal part of A */
sum_N_pos = 0;
sum_N_neg = 0;
sum_P_pos = 0;
sum_P_neg = 0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
if (num_functions == 1 || dof_func[i1] == dof_func[i])
{
if (A_diag_data[jj] > 0)
sum_N_pos += A_diag_data[jj];
else
sum_N_neg += A_diag_data[jj];
}
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
if (A_diag_data[jj] > 0)
sum_P_pos += A_diag_data[jj];
else
sum_P_neg += A_diag_data[jj];
}
}
/*----------------------------------------------------------------
* Still looping over ith row of A. Next, loop over the
* off-diagonal part of A
*---------------------------------------------------------------*/
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if (num_functions == 1 || dof_func_offd[i1] == dof_func[i])
{
if (A_offd_data[jj] > 0)
sum_N_pos += A_offd_data[jj];
else
sum_N_neg += A_offd_data[jj];
}
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker_offd[i1] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
if (A_offd_data[jj] > 0)
sum_P_pos += A_offd_data[jj];
else
sum_P_neg += A_offd_data[jj];
}
}
}
if (sum_P_neg) alfa = sum_N_neg/sum_P_neg/diagonal;
if (sum_P_pos) beta = sum_N_pos/sum_P_pos/diagonal;
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
if (P_diag_data[jj]> 0)
P_diag_data[jj] *= -beta;
else
P_diag_data[jj] *= -alfa;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
if (P_offd_data[jj]> 0)
P_offd_data[jj] *= -beta;
else
P_offd_data[jj] *= -alfa;
}
}
P_offd_i[i+1] = jj_counter_offd;
}
hypre_TFree(P_marker);
hypre_TFree(P_marker_offd);
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
num_cols_P_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_cols_A_offd; i++)
P_marker[i] = 0;
num_cols_P_offd = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_Int,num_cols_P_offd);
index = 0;
for (i=0; i < num_cols_P_offd; i++)
{
while (P_marker[index]==0) index++;
col_map_offd_P[i] = index++;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(col_map_offd_P,
P_offd_j[i],
num_cols_P_offd);
hypre_TFree(P_marker);
}
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_GetCommPkgRTFromCommPkgA(P,A,fine_to_coarse_offd);
*P_ptr = P;
hypre_TFree(CF_marker_offd);
hypre_TFree(dof_func_offd);
hypre_TFree(int_buf_data);
hypre_TFree(fine_to_coarse);
hypre_TFree(fine_to_coarse_offd);
hypre_TFree(coarse_counter);
hypre_TFree(jj_count);
hypre_TFree(jj_count_offd);
return(0);
}
HYPRE_Int
hypre_BoomerAMGInterpTruncation( hypre_ParCSRMatrix *P,
double trunc_factor,
HYPRE_Int max_elmts)
{
hypre_CSRMatrix *P_diag = hypre_ParCSRMatrixDiag(P);
HYPRE_Int *P_diag_i = hypre_CSRMatrixI(P_diag);
HYPRE_Int *P_diag_j = hypre_CSRMatrixJ(P_diag);
double *P_diag_data = hypre_CSRMatrixData(P_diag);
HYPRE_Int *P_diag_j_new;
double *P_diag_data_new;
hypre_CSRMatrix *P_offd = hypre_ParCSRMatrixOffd(P);
HYPRE_Int *P_offd_i = hypre_CSRMatrixI(P_offd);
HYPRE_Int *P_offd_j = hypre_CSRMatrixJ(P_offd);
double *P_offd_data = hypre_CSRMatrixData(P_offd);
HYPRE_Int *P_offd_j_new;
double *P_offd_data_new;
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(P_diag);
HYPRE_Int num_cols = hypre_CSRMatrixNumCols(P_diag);
HYPRE_Int i, j, start_j;
HYPRE_Int ierr = 0;
HYPRE_Int next_open;
HYPRE_Int now_checking;
HYPRE_Int num_lost;
HYPRE_Int num_lost_global;
HYPRE_Int next_open_offd;
HYPRE_Int now_checking_offd;
HYPRE_Int num_lost_offd;
HYPRE_Int num_lost_global_offd;
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int num_elmts;
HYPRE_Int cnt, cnt_diag, cnt_offd;
double max_coef;
double row_sum;
double scale;
/* Threading variables. Entry i of num_lost_(offd_)per_thread holds the
* number of dropped entries over thread i's row range. Cum_lost_per_thread
* will temporarily store the cumulative number of dropped entries up to
* each thread. */
HYPRE_Int my_thread_num, num_threads, start, stop;
HYPRE_Int * max_num_threads = hypre_CTAlloc(HYPRE_Int, 1);
HYPRE_Int * cum_lost_per_thread;
HYPRE_Int * num_lost_per_thread;
HYPRE_Int * num_lost_offd_per_thread;
/* Initialize threading variables */
max_num_threads[0] = hypre_NumThreads();
cum_lost_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0]);
num_lost_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0]);
num_lost_offd_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0]);
for(i=0; i < max_num_threads[0]; i++)
{
num_lost_per_thread[i] = 0;
num_lost_offd_per_thread[i] = 0;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,my_thread_num,num_threads,max_coef,j,start_j,row_sum,scale,num_lost,now_checking,next_open,num_lost_offd,now_checking_offd,next_open_offd,start,stop,cnt_diag,cnt_offd,num_elmts,cnt)
#endif
{
my_thread_num = hypre_GetThreadNum();
num_threads = hypre_NumActiveThreads();
/* Compute each thread's range of rows to truncate and compress. Note,
* that i, j and data are all compressed as entries are dropped, but
* that the compression only occurs locally over each thread's row
* range. P_diag_i is only made globally consistent at the end of this
* routine. During the dropping phases, P_diag_i[stop] will point to
* the start of the next thread's row range. */
/* my row range */
start = (n_fine/num_threads)*my_thread_num;
if (my_thread_num == num_threads-1)
{ stop = n_fine; }
else
{ stop = (n_fine/num_threads)*(my_thread_num+1); }
/*
* Truncate based on truncation tolerance
*/
if (trunc_factor > 0)
{
num_lost = 0;
num_lost_offd = 0;
next_open = P_diag_i[start];
now_checking = P_diag_i[start];
next_open_offd = P_offd_i[start];;
now_checking_offd = P_offd_i[start];;
for (i = start; i < stop; i++)
{
max_coef = 0;
for (j = P_diag_i[i]; j < P_diag_i[i+1]; j++)
max_coef = (max_coef < fabs(P_diag_data[j])) ?
fabs(P_diag_data[j]) : max_coef;
for (j = P_offd_i[i]; j < P_offd_i[i+1]; j++)
max_coef = (max_coef < fabs(P_offd_data[j])) ?
fabs(P_offd_data[j]) : max_coef;
max_coef *= trunc_factor;
start_j = P_diag_i[i];
P_diag_i[i] -= num_lost;
row_sum = 0;
scale = 0;
for (j = start_j; j < P_diag_i[i+1]; j++)
{
row_sum += P_diag_data[now_checking];
if (fabs(P_diag_data[now_checking]) < max_coef)
{
num_lost++;
now_checking++;
}
else
{
scale += P_diag_data[now_checking];
P_diag_data[next_open] = P_diag_data[now_checking];
P_diag_j[next_open] = P_diag_j[now_checking];
now_checking++;
next_open++;
}
}
start_j = P_offd_i[i];
P_offd_i[i] -= num_lost_offd;
for (j = start_j; j < P_offd_i[i+1]; j++)
{
row_sum += P_offd_data[now_checking_offd];
if (fabs(P_offd_data[now_checking_offd]) < max_coef)
{
num_lost_offd++;
now_checking_offd++;
}
else
{
scale += P_offd_data[now_checking_offd];
P_offd_data[next_open_offd] = P_offd_data[now_checking_offd];
P_offd_j[next_open_offd] = P_offd_j[now_checking_offd];
now_checking_offd++;
next_open_offd++;
}
}
/* normalize row of P */
if (scale != 0.)
{
if (scale != row_sum)
{
scale = row_sum/scale;
for (j = P_diag_i[i]; j < (P_diag_i[i+1]-num_lost); j++)
P_diag_data[j] *= scale;
for (j = P_offd_i[i]; j < (P_offd_i[i+1]-num_lost_offd); j++)
P_offd_data[j] *= scale;
}
}
} /* end loop for (i = 0; i < n_fine; i++) */
/* store number of dropped elements and number of threads */
if(my_thread_num == 0)
{ max_num_threads[0] = num_threads; }
num_lost_per_thread[my_thread_num] = num_lost;
num_lost_offd_per_thread[my_thread_num] = num_lost_offd;
} /* end if (trunc_factor > 0) */
/*
* Truncate based on capping the nnz per row
*
*/
if (max_elmts > 0)
{
HYPRE_Int P_mxnum, cnt1, last_index, last_index_offd;
HYPRE_Int *P_aux_j;
double *P_aux_data;
/* find maximum row length locally over this row range */
P_mxnum = 0;
for (i=start; i<stop; i++)
{
/* Note P_diag_i[stop] is the starting point for the next thread
* in j and data, not the stop point for this thread */
last_index = P_diag_i[i+1];
last_index_offd = P_offd_i[i+1];
if(i == stop-1)
{
last_index -= num_lost_per_thread[my_thread_num];
last_index_offd -= num_lost_offd_per_thread[my_thread_num];
}
cnt1 = last_index-P_diag_i[i] + last_index_offd-P_offd_i[i];
if (cnt1 > P_mxnum) P_mxnum = cnt1;
}
/* Some rows exceed max_elmts, and require truncation. Essentially,
* each thread truncates and compresses its range of rows locally. */
if (P_mxnum > max_elmts)
{
num_lost = 0;
num_lost_offd = 0;
/* two temporary arrays to hold row i for temporary operations */
P_aux_j = hypre_CTAlloc(HYPRE_Int, P_mxnum);
P_aux_data = hypre_CTAlloc(double, P_mxnum);
cnt_diag = P_diag_i[start];
cnt_offd = P_offd_i[start];
for (i = start; i < stop; i++)
{
/* Note P_diag_i[stop] is the starting point for the next thread
* in j and data, not the stop point for this thread */
last_index = P_diag_i[i+1];
last_index_offd = P_offd_i[i+1];
if(i == stop-1)
{
last_index -= num_lost_per_thread[my_thread_num];
last_index_offd -= num_lost_offd_per_thread[my_thread_num];
}
row_sum = 0;
num_elmts = last_index-P_diag_i[i] + last_index_offd-P_offd_i[i];
if (max_elmts < num_elmts)
{
/* copy both diagonal and off-diag parts of row i to _aux_ arrays */
cnt = 0;
for (j = P_diag_i[i]; j < last_index; j++)
{
P_aux_j[cnt] = P_diag_j[j];
P_aux_data[cnt++] = P_diag_data[j];
row_sum += P_diag_data[j];
}
num_lost += cnt;
cnt1 = cnt;
for (j = P_offd_i[i]; j < last_index_offd; j++)
{
P_aux_j[cnt] = P_offd_j[j]+num_cols;
P_aux_data[cnt++] = P_offd_data[j];
row_sum += P_offd_data[j];
}
num_lost_offd += cnt-cnt1;
/* sort data */
hypre_qsort2abs(P_aux_j,P_aux_data,0,cnt-1);
scale = 0;
P_diag_i[i] = cnt_diag;
P_offd_i[i] = cnt_offd;
for (j = 0; j < max_elmts; j++)
{
scale += P_aux_data[j];
if (P_aux_j[j] < num_cols)
{
P_diag_j[cnt_diag] = P_aux_j[j];
P_diag_data[cnt_diag++] = P_aux_data[j];
}
else
{
P_offd_j[cnt_offd] = P_aux_j[j]-num_cols;
P_offd_data[cnt_offd++] = P_aux_data[j];
}
}
num_lost -= cnt_diag-P_diag_i[i];
num_lost_offd -= cnt_offd-P_offd_i[i];
/* normalize row of P */
if (scale != 0.)
{
if (scale != row_sum)
{
scale = row_sum/scale;
for (j = P_diag_i[i]; j < cnt_diag; j++)
P_diag_data[j] *= scale;
for (j = P_offd_i[i]; j < cnt_offd; j++)
P_offd_data[j] *= scale;
}
}
} /* end if (max_elmts < num_elmts) */
else
{
/* nothing dropped from this row, but still have to shift entries back
* by the number dropped so far */
if (P_diag_i[i] != cnt_diag)
{
start_j = P_diag_i[i];
P_diag_i[i] = cnt_diag;
for (j = start_j; j < last_index; j++)
{
P_diag_j[cnt_diag] = P_diag_j[j];
P_diag_data[cnt_diag++] = P_diag_data[j];
}
}
else
cnt_diag += last_index-P_diag_i[i];
if (P_offd_i[i] != cnt_offd)
{
start_j = P_offd_i[i];
P_offd_i[i] = cnt_offd;
for (j = start_j; j < last_index_offd; j++)
{
P_offd_j[cnt_offd] = P_offd_j[j];
P_offd_data[cnt_offd++] = P_offd_data[j];
}
}
else
cnt_offd += last_index_offd-P_offd_i[i];
}
} /* end for (i = 0; i < n_fine; i++) */
num_lost_per_thread[my_thread_num] += num_lost;
num_lost_offd_per_thread[my_thread_num] += num_lost_offd;
hypre_TFree(P_aux_j);
hypre_TFree(P_aux_data);
} /* end if (P_mxnum > max_elmts) */
} /* end if (max_elmts > 0) */
/* Sum up num_lost_global */
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if(my_thread_num == 0)
{
num_lost_global = 0;
num_lost_global_offd = 0;
for(i = 0; i < max_num_threads[0]; i++)
{
num_lost_global += num_lost_per_thread[i];
num_lost_global_offd += num_lost_offd_per_thread[i];
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/*
* Synchronize and create new diag data structures
*/
if (num_lost_global)
{
/* Each thread has it's own locally compressed CSR matrix from rows start
* to stop. Now, we have to copy each thread's chunk into the new
* process-wide CSR data structures
*
* First, we compute the new process-wide number of nonzeros (i.e.,
* P_diag_size), and compute cum_lost_per_thread[k] so that this
* entry holds the cumulative sum of entries dropped up to and
* including thread k. */
if(my_thread_num == 0)
{
P_diag_size = P_diag_i[n_fine];
for(i = 0; i < max_num_threads[0]; i++)
{
P_diag_size -= num_lost_per_thread[i];
if(i > 0)
{ cum_lost_per_thread[i] = num_lost_per_thread[i] + cum_lost_per_thread[i-1]; }
else
{ cum_lost_per_thread[i] = num_lost_per_thread[i]; }
}
P_diag_j_new = hypre_CTAlloc(HYPRE_Int,P_diag_size);
P_diag_data_new = hypre_CTAlloc(double,P_diag_size);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/* points to next open spot in new data structures for this thread */
if(my_thread_num == 0)
{ next_open = 0; }
else
{
/* remember, cum_lost_per_thread[k] stores the num dropped up to and
* including thread k */
next_open = P_diag_i[start] - cum_lost_per_thread[my_thread_num-1];
}
/* copy the j and data arrays over */
for(i = P_diag_i[start]; i < P_diag_i[stop] - num_lost_per_thread[my_thread_num]; i++)
{
P_diag_j_new[next_open] = P_diag_j[i];
P_diag_data_new[next_open] = P_diag_data[i];
next_open += 1;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/* update P_diag_i with number of dropped entries by all lower ranked
* threads */
if(my_thread_num > 0)
{
for(i=start; i<stop; i++)
{
P_diag_i[i] -= cum_lost_per_thread[my_thread_num-1];
}
}
if(my_thread_num == 0)
{
/* Set last entry */
P_diag_i[n_fine] = P_diag_size ;
hypre_TFree(P_diag_j);
hypre_TFree(P_diag_data);
hypre_CSRMatrixJ(P_diag) = P_diag_j_new;
hypre_CSRMatrixData(P_diag) = P_diag_data_new;
hypre_CSRMatrixNumNonzeros(P_diag) = P_diag_size;
}
}
/*
* Synchronize and create new offd data structures
*/
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (num_lost_global_offd)
{
/* Repeat process for off-diagonal */
if(my_thread_num == 0)
{
P_offd_size = P_offd_i[n_fine];
for(i = 0; i < max_num_threads[0]; i++)
{
P_offd_size -= num_lost_offd_per_thread[i];
if(i > 0)
{ cum_lost_per_thread[i] = num_lost_offd_per_thread[i] + cum_lost_per_thread[i-1]; }
else
{ cum_lost_per_thread[i] = num_lost_offd_per_thread[i]; }
}
P_offd_j_new = hypre_CTAlloc(HYPRE_Int,P_offd_size);
P_offd_data_new = hypre_CTAlloc(double,P_offd_size);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/* points to next open spot in new data structures for this thread */
if(my_thread_num == 0)
{ next_open = 0; }
else
{
/* remember, cum_lost_per_thread[k] stores the num dropped up to and
* including thread k */
next_open = P_offd_i[start] - cum_lost_per_thread[my_thread_num-1];
}
/* copy the j and data arrays over */
for(i = P_offd_i[start]; i < P_offd_i[stop] - num_lost_offd_per_thread[my_thread_num]; i++)
{
P_offd_j_new[next_open] = P_offd_j[i];
P_offd_data_new[next_open] = P_offd_data[i];
next_open += 1;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/* update P_offd_i with number of dropped entries by all lower ranked
* threads */
if(my_thread_num > 0)
{
for(i=start; i<stop; i++)
{
P_offd_i[i] -= cum_lost_per_thread[my_thread_num-1];
}
}
if(my_thread_num == 0)
{
/* Set last entry */
P_offd_i[n_fine] = P_offd_size ;
hypre_TFree(P_offd_j);
hypre_TFree(P_offd_data);
hypre_CSRMatrixJ(P_offd) = P_offd_j_new;
hypre_CSRMatrixData(P_offd) = P_offd_data_new;
hypre_CSRMatrixNumNonzeros(P_offd) = P_offd_size;
}
}
} /* end parallel region */
hypre_TFree(max_num_threads);
hypre_TFree(cum_lost_per_thread);
hypre_TFree(num_lost_per_thread);
hypre_TFree(num_lost_offd_per_thread);
return ierr;
}
/* sort both v and w, in place, but based only on entries in w */
void hypre_qsort2abs( HYPRE_Int *v,
double *w,
HYPRE_Int left,
HYPRE_Int right )
{
HYPRE_Int i, last;
if (left >= right)
return;
swap2( v, w, left, (left+right)/2);
last = left;
for (i = left+1; i <= right; i++)
if (fabs(w[i]) > fabs(w[left]))
{
swap2(v, w, ++last, i);
}
swap2(v, w, left, last);
hypre_qsort2abs(v, w, left, last-1);
hypre_qsort2abs(v, w, last+1, right);
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildInterpModUnk - this is a modified interpolation for the unknown approach.
* here we need to pass in a strength matrix built on the entire matrix.
*
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildInterpModUnk( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_Int *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
double trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
double *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
double *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
hypre_ParCSRMatrix *P;
HYPRE_Int *col_map_offd_P;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
hypre_CSRMatrix *A_ext;
double *A_ext_data = NULL;
HYPRE_Int *A_ext_i = NULL;
HYPRE_Int *A_ext_j = NULL;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
double *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
double *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int *P_marker, *P_marker_offd;
HYPRE_Int jj_counter,jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
HYPRE_Int jj_begin_row,jj_begin_row_offd;
HYPRE_Int jj_end_row,jj_end_row_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int strong_f_marker;
HYPRE_Int *fine_to_coarse;
HYPRE_Int *fine_to_coarse_offd;
HYPRE_Int *coarse_counter;
HYPRE_Int coarse_shift;
HYPRE_Int total_global_cpts;
HYPRE_Int num_cols_P_offd,my_first_cpt;
HYPRE_Int i,i1,i2;
HYPRE_Int j,jl,jj,jj1;
HYPRE_Int k,kc;
HYPRE_Int start;
HYPRE_Int sgn;
HYPRE_Int c_num;
double diagonal;
double sum;
double distribute;
double zero = 0.0;
double one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int print_level = 0;
HYPRE_Int *int_buf_data;
HYPRE_Int col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int col_n = col_1 + local_numrows;
double wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
#ifdef HYPRE_NO_GLOBAL_PARTITION
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm);
#else
my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag < 0)
{
debug_flag = -debug_flag;
print_level = 1;
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
if (num_functions > 1 && num_cols_A_offd)
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends));
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (num_functions > 1)
{
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*----------------------------------------------------------------------
* Get the ghost rows of A
*---------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_procs > 1)
{
A_ext = hypre_ParCSRMatrixExtractBExt(A,A,1);
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
}
index = 0;
for (i=0; i < num_cols_A_offd; i++)
{
for (j=A_ext_i[i]; j < A_ext_i[i+1]; j++)
{
k = A_ext_j[j];
if (k >= col_1 && k < col_n)
{
A_ext_j[index] = k - col_1;
A_ext_data[index++] = A_ext_data[j];
}
else
{
kc = hypre_BinarySearch(col_map_offd,k,num_cols_A_offd);
if (kc > -1)
{
A_ext_j[index] = -kc-1;
A_ext_data[index++] = A_ext_data[j];
}
}
}
A_ext_i[i] = index;
}
for (i = num_cols_A_offd; i > 0; i--)
A_ext_i[i] = A_ext_i[i-1];
if (num_procs > 1) A_ext_i[0] = 0;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 2 Get A_ext = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
jj_count[j]++;
fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i.
*--------------------------------------------------------------------*/
else
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] >= 0)
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i=0; i < num_threads-1; i++)
{
coarse_counter[i+1] += coarse_counter[i];
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size);
P_diag_data = hypre_CTAlloc(double, P_diag_size);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size);
P_offd_data = hypre_CTAlloc(double, P_offd_size);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) coarse_shift = coarse_counter[j-1];
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
fine_to_coarse[i] += my_first_cpt+coarse_shift;
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n",
my_id, wall_time);
fflush(NULL);
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,diagonal,distribute,P_marker,P_marker_offd,strong_f_marker,jj_counter,jj_counter_offd,sgn,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE
#endif
for (jl = 0; jl < num_threads; jl++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (jl < rest)
{
ns = jl*size+jl;
ne = (jl+1)*size+jl+1;
}
else
{
ns = jl*size+rest;
ne = (jl+1)*size+rest;
}
jj_counter = 0;
if (jl > 0) jj_counter = jj_count[jl-1];
jj_counter_offd = 0;
if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine);
if (num_cols_A_offd)
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
else
P_marker_offd = NULL;
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < num_cols_A_offd; i++)
{
P_marker_offd[i] = -1;
}
strong_f_marker = -2;
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
jj_begin_row = jj_counter;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
/*--------------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*--------------------------------------------------------------*/
else if (CF_marker[i1] != -3)
{
P_marker[i1] = strong_f_marker;
}
}
jj_end_row = jj_counter;
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
jj_begin_row_offd = jj_counter_offd;
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
/*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
/*-----------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*-----------------------------------------------------------*/
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
/*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
/*-----------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*-----------------------------------------------------------*/
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
}
}
}
}
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
/* Loop over ith row of A. First, the diagonal part of A */
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
/*--------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and strongly influences i,
* distribute a_{i,i1} to C-points that strongly infuence i.
* Note: currently no distribution to the diagonal in this case.
HERE, we only want to distribut to points of the SAME function type
*--------------------------------------------------------------*/
else if (P_marker[i1] == strong_f_marker)
{
sum = zero;
/*-----------------------------------------------------------
* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*-----------------------------------------------------------*/
sgn = 1;
if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1;
/* Diagonal block part of row i1 */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (num_functions == 1 || dof_func[i1] == dof_func[i2])
{
if (P_marker[i2] >= jj_begin_row &&
(sgn*A_diag_data[jj1]) < 0 )
{
sum += A_diag_data[jj1];
}
}
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (num_functions == 1 || dof_func[i1] == dof_func[i2])
{
if (P_marker_offd[i2] >= jj_begin_row_offd
&& (sgn*A_offd_data[jj1]) < 0)
{
sum += A_offd_data[jj1];
}
}
}
}
if (sum != 0)
{
distribute = A_diag_data[jj] / sum;
/*-----------------------------------------------------------
* Loop over row of A for point i1 and do the distribution.
*-----------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (num_functions == 1 || dof_func[i1] == dof_func[i2])
{
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_diag_data[jj1]) < 0)
{
P_diag_data[P_marker[i2]]
+= distribute * A_diag_data[jj1];
}
}
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (num_functions == 1 || dof_func[i1] == dof_func[i2])
{
if (P_marker_offd[i2] >= jj_begin_row_offd
&& (sgn*A_offd_data[jj1]) < 0)
{
P_offd_data[P_marker_offd[i2]]
+= distribute * A_offd_data[jj1];
}
}
}
}
}
else /* sum = 0 - only add to diag if the same function type */
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
/*--------------------------------------------------------------
* Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}
* into the diagonal. (only if the same function type)
*--------------------------------------------------------------*/
else if (CF_marker[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
/*----------------------------------------------------------------
* Still looping over ith row of A. Next, loop over the
* off-diagonal part of A
*---------------------------------------------------------------*/
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker_offd[i1] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
}
/*------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and strongly influences i,
* distribute a_{i,i1} to C-points that strongly infuence i.
* Note: currently no distribution to the diagonal in this case.
AGAIN, we only want to distribut to points of the SAME function type
*-----------------------------------------------------------*/
else if (P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
/*---------------------------------------------------------
* Loop over row of A_ext for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*---------------------------------------------------------*/
/* find row number */
c_num = A_offd_j[jj];
sgn = 1;
if (A_ext_data[A_ext_i[c_num]] < 0) sgn = -1;
for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)
{
i2 = A_ext_j[jj1];
if (num_functions == 1 || dof_func[i1] == dof_func[i2])
{
if (i2 > -1)
{
/* in the diagonal block */
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_ext_data[jj1]) < 0)
{
sum += A_ext_data[jj1];
}
}
else
{
/* in the off_diagonal block */
if (P_marker_offd[-i2-1] >= jj_begin_row_offd
&& (sgn*A_ext_data[jj1]) < 0)
{
sum += A_ext_data[jj1];
}
}
}
}
if (sum != 0)
{
distribute = A_offd_data[jj] / sum;
/*---------------------------------------------------------
* Loop over row of A_ext for point i1 and do
* the distribution.
*--------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)
{
i2 = A_ext_j[jj1];
if (num_functions == 1 || dof_func[i1] == dof_func[i2])
{
if (i2 > -1) /* in the diagonal block */
{
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_ext_data[jj1]) < 0)
{
P_diag_data[P_marker[i2]]
+= distribute * A_ext_data[jj1];
}
}
else
{
/* in the off_diagonal block */
if (P_marker_offd[-i2-1] >= jj_begin_row_offd
&& (sgn*A_ext_data[jj1]) < 0)
P_offd_data[P_marker_offd[-i2-1]]
+= distribute * A_ext_data[jj1];
}
}
}
}
else /* sum = 0 */
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
/*-----------------------------------------------------------
* Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}
* into the diagonal.
*-----------------------------------------------------------*/
else if (CF_marker_offd[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
}
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
if (diagonal == 0.0)
{
if (print_level)
hypre_printf(" Warning! zero diagonal! Proc id %d row %d\n", my_id,i);
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] = 0.0;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] = 0.0;
}
}
else
{
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] /= -diagonal;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] /= -diagonal;
}
}
}
strong_f_marker--;
P_offd_i[i+1] = jj_counter_offd;
}
hypre_TFree(P_marker);
hypre_TFree(P_marker_offd);
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
num_cols_P_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_cols_A_offd; i++)
P_marker[i] = 0;
num_cols_P_offd = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_Int,num_cols_P_offd);
index = 0;
for (i=0; i < num_cols_P_offd; i++)
{
while (P_marker[index]==0) index++;
col_map_offd_P[i] = index++;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(col_map_offd_P,
P_offd_j[i],
num_cols_P_offd);
hypre_TFree(P_marker);
}
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_GetCommPkgRTFromCommPkgA(P,A, fine_to_coarse_offd);
*P_ptr = P;
hypre_TFree(CF_marker_offd);
hypre_TFree(dof_func_offd);
hypre_TFree(int_buf_data);
hypre_TFree(fine_to_coarse);
hypre_TFree(fine_to_coarse_offd);
hypre_TFree(coarse_counter);
hypre_TFree(jj_count);
hypre_TFree(jj_count_offd);
if (num_procs > 1) hypre_CSRMatrixDestroy(A_ext);
return(0);
}
|
GB_unop__tanh_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__tanh_fp64_fp64
// op(A') function: GB_unop_tran__tanh_fp64_fp64
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = tanh (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = tanh (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = tanh (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TANH || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__tanh_fp64_fp64
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = tanh (z) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__tanh_fp64_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
VolumetricDilatedMaxPooling.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "THNN/generic/VolumetricDilatedMaxPooling.c"
#else
#include <THNN/generic/pooling_shape.h>
#include <algorithm>
static inline void THNN_(VolumetricDilatedMaxPooling_shapeCheck)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THIndexTensor *indices,
int kT, int kW, int kH,
int dT, int dW, int dH,
int pT, int pW, int pH,
int dilationT, int dilationW, int dilationH,
bool ceilMode) {
int ndim = input->dim();
int dimN = 0;
int dimt = 1;
int dimh = 2;
int dimw = 3;
int64_t nslices;
int64_t itime;
int64_t iheight;
int64_t iwidth;
int64_t otime;
int64_t oheight;
int64_t owidth;
THArgCheck(kT > 0 && kW > 0 && kH > 0, 5,
"kernel size should be greater than zero, but got kT: %d kH: %d kW: %d",
kT, kH, kW);
THArgCheck(dT > 0 && dW > 0 && dH > 0, 8,
"stride should be greater than zero, but got dT: %d dH: %d dW: %d",
dT, dH, dW);
THArgCheck(dilationT > 0 && dilationW > 0 && dilationH > 0, 14,
"dilation should be greater than 0, but got dilationT: %d dilationH: %d dilationW: %d",
dilationT, dilationH, dilationW);
THNN_ARGCHECK(!input->is_empty() && (input->dim() == 4 || input->dim() == 5), 2, input,
"non-empty 4D or 5D (batch mode) tensor expected for input, but got: %s");
if (input->dim() == 5)
{
dimN++;
dimt++;
dimh++;
dimw++;
}
THArgCheck(kT/2 >= pT && kW/2 >= pW && kH/2 >= pH, 2,
"pad should be smaller than half of kernel size, but got "
"kT: %d kW: %d, kH: %d, padT: %d, padW: %d, padH: %d",
kT, kW, kH, pT, pW, pH);
nslices = input->size(dimN);
itime = input->size(dimt);
iheight = input->size(dimh);
iwidth = input->size(dimw);
otime = pooling_output_shape<int64_t>(itime, kT, pT, dT, dilationT, ceilMode);
oheight = pooling_output_shape<int64_t>(iheight, kH, pH, dH, dilationH, ceilMode);
owidth = pooling_output_shape<int64_t>(iwidth, kW, pW, dW, dilationW, ceilMode);
if (otime < 1 || owidth < 1 || oheight < 1)
THError("Given input size: (%dx%dx%dx%d). Calculated output size: (%dx%dx%dx%d). Output size is too small",
nslices,itime,iheight,iwidth,nslices,otime,oheight,owidth);
if (gradOutput != NULL) {
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimN, nslices);
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimt, otime);
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimh, oheight);
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimw, owidth);
}
if (indices != NULL) {
THNN_CHECK_DIM_SIZE_INDICES(indices, ndim, dimN, nslices);
THNN_CHECK_DIM_SIZE_INDICES(indices, ndim, dimt, otime);
THNN_CHECK_DIM_SIZE_INDICES(indices, ndim, dimh, oheight);
THNN_CHECK_DIM_SIZE_INDICES(indices, ndim, dimw, owidth);
}
}
static void THNN_(VolumetricDilatedMaxPooling_updateOutput_frame)(
scalar_t *input_p,
scalar_t *output_p,
THIndex_t *indz_p,
int64_t nslices,
int64_t itime,
int64_t iwidth,
int64_t iheight,
int64_t otime,
int64_t owidth,
int64_t oheight,
int kT,
int kW,
int kH,
int dT,
int dW,
int dH,
int pT,
int pW,
int pH,
int dilationT,
int dilationW,
int dilationH)
{
int64_t k;
#pragma omp parallel for private(k)
for (k = 0; k < nslices; k++)
{
/* loop over output */
int64_t i, j, ti;
scalar_t *ip = input_p + k * itime * iwidth * iheight;
for (ti = 0; ti < otime; ti++)
{
for (i = 0; i < oheight; i++)
{
for (j = 0; j < owidth; j++)
{
/* local pointers */
int64_t start_t = ti * dT - pT;
int64_t start_h = i * dH - pH;
int64_t start_w = j * dW - pW;
int64_t end_t = std::min(start_t + (kT - 1) * dilationT + 1, itime);
int64_t end_h = std::min(start_h + (kH - 1) * dilationH + 1, iheight);
int64_t end_w = std::min(start_w + (kW - 1) * dilationW + 1, iwidth);
while(start_t < 0)
start_t += dilationT;
while(start_h < 0)
start_h += dilationH;
while(start_w < 0)
start_w += dilationW;
scalar_t *op = output_p + k * otime * owidth * oheight
+ ti * owidth * oheight + i * owidth + j;
THIndex_t *indzp = indz_p + k * otime * owidth * oheight
+ ti * owidth * oheight + i * owidth + j;
/* compute local max: */
int64_t maxindex = -1;
scalar_t maxval = -THInf;
int64_t x,y,z;
int64_t index = 0;
for (z = start_t; z < end_t; z += dilationT)
{
for (y = start_h; y < end_h; y += dilationH)
{
for (x = start_w; x < end_w; x += dilationW)
{
index = z * iwidth * iheight + y * iwidth + x;
scalar_t val = ip[index];
if ((val > maxval) || std::isnan(val))
{
maxval = val;
maxindex = index;
}
}
}
}
// store location of max
*indzp = maxindex;
/* set output to local max */
*op = maxval;
}
}
}
}
}
void THNN_(VolumetricDilatedMaxPooling_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
THIndexTensor *indices,
int kT,
int kW,
int kH,
int dT,
int dW,
int dH,
int pT,
int pW,
int pH,
int dilationT,
int dilationW,
int dilationH,
bool ceilMode)
{
int64_t nslices;
int64_t itime;
int64_t iheight;
int64_t iwidth;
int64_t otime;
int64_t oheight;
int64_t owidth;
scalar_t *input_data;
scalar_t *output_data;
THIndex_t *indices_data;
int dimN = 0;
int dimt = 1;
int dimh = 2;
int dimw = 3;
if (input->dim() == 5)
{
dimN++;
dimt++;
dimh++;
dimw++;
}
THNN_(VolumetricDilatedMaxPooling_shapeCheck)(
state, input, NULL, NULL,
kT, kW, kH, dT, dW, dH,
pT, pW, pH, dilationT, dilationW, dilationH,
ceilMode);
/* sizes */
nslices = input->size(dimN);
itime = input->size(dimt);
iheight = input->size(dimh);
iwidth = input->size(dimw);
otime = pooling_output_shape<int64_t>(itime, kT, pT, dT, dilationT, ceilMode);
oheight = pooling_output_shape<int64_t>(iheight, kH, pH, dH, dilationH, ceilMode);
owidth = pooling_output_shape<int64_t>(iwidth, kW, pW, dW, dilationW, ceilMode);
/* get contiguous input */
input = THTensor_(newContiguous)(input);
if (input->dim() == 4) /* non-batch mode */
{
/* resize output */
THTensor_(resize4d)(output, nslices, otime, oheight, owidth);
/* indices will contain ti,i,j uchar locations packed into float/double */
THIndexTensor_(resize4d)(indices, nslices, otime, oheight, owidth);
input_data = input->data<scalar_t>();
output_data = output->data<scalar_t>();
indices_data = THIndexTensor_(data)(indices);
THNN_(VolumetricDilatedMaxPooling_updateOutput_frame)(
input_data, output_data,
indices_data,
nslices,
itime, iwidth, iheight,
otime, owidth, oheight,
kT, kW, kH,
dT, dW, dH,
pT, pW, pH,
dilationT, dilationW, dilationH
);
}
else /* batch mode */
{
int64_t p;
int64_t nBatch = input->size(0);
int64_t istride = nslices * itime * iwidth * iheight;
int64_t ostride = nslices * otime * owidth * oheight;
/* resize output */
THTensor_(resize5d)(output, nBatch, nslices, otime, oheight, owidth);
/* indices will contain ti,i,j locations for each output point */
THIndexTensor_(resize5d)(indices, nBatch, nslices, otime, oheight, owidth);
input_data = input->data<scalar_t>();
output_data = output->data<scalar_t>();
indices_data = THIndexTensor_(data)(indices);
#pragma omp parallel for private(p)
for (p=0; p < nBatch; p++)
{
THNN_(VolumetricDilatedMaxPooling_updateOutput_frame)(
input_data + p * istride,
output_data + p * ostride,
indices_data + p * ostride,
nslices,
itime, iwidth, iheight,
otime, owidth, oheight,
kT, kW, kH,
dT, dW, dH,
pT, pW, pH,
dilationT, dilationW, dilationH
);
}
}
/* cleanup */
c10::raw::intrusive_ptr::decref(input);
}
static void THNN_(VolumetricDilatedMaxPooling_updateGradInput_frame)(
scalar_t *gradInput_p,
scalar_t *gradOutput_p,
THIndex_t *indz_p,
int64_t nslices,
int64_t itime,
int64_t iwidth,
int64_t iheight,
int64_t otime,
int64_t owidth,
int64_t oheight,
int dT,
int dW,
int dH,
int pT,
int pW,
int pH,
int dilationT,
int dilationW,
int dilationH)
{
int64_t k;
#pragma omp parallel for private(k)
for (k = 0; k < nslices; k++)
{
scalar_t *gradInput_p_k = gradInput_p + k * itime * iwidth * iheight;
scalar_t *gradOutput_p_k = gradOutput_p + k * otime * owidth * oheight;
THIndex_t *indz_p_k = indz_p + k * otime * owidth * oheight;
/* calculate max points */
int64_t ti, i, j;
for (ti = 0; ti < otime; ti++)
{
for (i = 0; i < oheight; i++)
{
for (j = 0; j < owidth; j++)
{
/* retrieve position of max */
int64_t index = ti * oheight * owidth + i * owidth + j;
int64_t maxp = indz_p_k[index];
if (maxp != -1) {
/* update gradient */
gradInput_p_k[maxp] += gradOutput_p_k[index];
}
}
}
}
}
}
void THNN_(VolumetricDilatedMaxPooling_updateGradInput)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
THIndexTensor *indices,
int kT,
int kW,
int kH,
int dT,
int dW,
int dH,
int pT,
int pW,
int pH,
int dilationT,
int dilationW,
int dilationH,
bool ceilMode)
{
int nslices;
int itime;
int iheight;
int iwidth;
int otime;
int oheight;
int owidth;
scalar_t *gradInput_data;
scalar_t *gradOutput_data;
THIndex_t *indices_data;
int dimN = 0;
int dimt = 1;
int dimh = 2;
int dimw = 3;
THNN_(VolumetricDilatedMaxPooling_shapeCheck)(
state, input, gradOutput, indices,
kT, kW, kH, dT, dW, dH,
pT, pW, pH, dilationT, dilationW, dilationH,
ceilMode);
// TODO: gradOutput shape check
/* get contiguous gradOutput */
gradOutput = THTensor_(newContiguous)(gradOutput);
/* resize */
THTensor_(resizeAs)(gradInput, input);
THTensor_(zero)(gradInput);
if (input->dim() == 5)
{
dimN++;
dimt++;
dimh++;
dimw++;
}
/* sizes */
nslices = input->size(dimN);
itime = input->size(dimt);
iheight = input->size(dimh);
iwidth = input->size(dimw);
otime = gradOutput->size(dimt);
oheight = gradOutput->size(dimh);
owidth = gradOutput->size(dimw);
/* get raw pointers */
gradInput_data = gradInput->data<scalar_t>();
gradOutput_data = gradOutput->data<scalar_t>();
indices_data = THIndexTensor_(data)(indices);
/* backprop */
if (input->dim() == 4) /* non-batch mode*/
{
THNN_(VolumetricDilatedMaxPooling_updateGradInput_frame)(
gradInput_data, gradOutput_data,
indices_data,
nslices,
itime, iwidth, iheight,
otime, owidth, oheight,
dT, dW, dH,
pT, pW, pH,
dilationT, dilationW, dilationH
);
}
else /* batch mode */
{
int64_t p;
int64_t nBatch = input->size(0);
int64_t istride = nslices * itime * iwidth * iheight;
int64_t ostride = nslices * otime * owidth * oheight;
#pragma omp parallel for private(p)
for (p = 0; p < nBatch; p++)
{
THNN_(VolumetricDilatedMaxPooling_updateGradInput_frame)(
gradInput_data + p * istride,
gradOutput_data + p * ostride,
indices_data + p * ostride,
nslices,
itime, iwidth, iheight,
otime, owidth, oheight,
dT, dW, dH,
pT, pW, pH,
dilationT, dilationW, dilationH
);
}
}
/* cleanup */
c10::raw::intrusive_ptr::decref(gradOutput);
}
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.