source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
GB_binop__first_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__first_int32
// A.*B function (eWiseMult): GB_AemultB__first_int32
// A*D function (colscale): GB_AxD__first_int32
// D*A function (rowscale): GB_DxB__first_int32
// C+=B function (dense accum): GB_Cdense_accumB__first_int32
// C+=b function (dense accum): GB_Cdense_accumb__first_int32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__first_int32
// C=scalar+B GB_bind1st__first_int32
// C=scalar+B' GB_bind1st_tran__first_int32
// C=A+scalar (none)
// C=A'+scalar (none)
// C type: int32_t
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = aij
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
;
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = x ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_FIRST || GxB_NO_INT32 || GxB_NO_FIRST_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__first_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__first_int32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__first_int32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__first_int32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *GB_RESTRICT Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__first_int32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *GB_RESTRICT Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__first_int32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__first_int32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__first_int32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
; ;
Cx [p] = x ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = Ax [p] ;
Cx [p] = aij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = x ; \
}
GrB_Info GB_bind1st_tran__first_int32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = aij ; \
}
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
outer_profile.h | #include "CSC.h"
#include "CSR.h"
#include "Triple.h"
#include "radix_sort/radix_sort.hpp"
// #include "vergesort/vergesort.h"
// #include "pdqsort/pdqsort.h"
// #include "cpp-TimSort/include/gfx/timsort.hpp"
#include "utility.h"
#include <algorithm>
#include <iostream>
#include <fstream>
#include <omp.h>
#include <unistd.h>
#include <cstring>
#include<set>
using namespace std;
static uint32_t ncols_of_A;
// static int *rows_to_blockers;
// static int *flops_by_row_blockers;
static int nrows_per_blocker;
#define SIZE 16
#define GFX_TIMSORT_USE_STD_MOVE 1
template <typename IT>
IT fast_mod(const IT input, const int ceil) {
return input >= ceil ? input % ceil : input;
}
template <typename IT, typename NT>
uint64_t getFlop(const CSC<IT, NT>& A, const CSR<IT, NT>& B)
{
uint64_t flop = 0;
#pragma omp parallel for reduction(+ : flop)
for (IT i = 0; i < A.cols; ++i)
{
IT colnnz = A.colptr[i + 1] - A.colptr[i];
IT rownnz = B.rowptr[i + 1] - B.rowptr[i];
flop += (colnnz * rownnz);
}
return flop;
}
template <typename IT>
inline IT get_blocker_id(IT rowid, IT* rows_to_blockers, IT num_blockers)
{
return rows_to_blockers[rowid];
IT cur_blocker_id = 0;
for (IT i=0; i<num_blockers; ++i)
if (rows_to_blockers[i] <= rowid)
cur_blocker_id = i;
return cur_blocker_id;
}
template <typename IT, typename NT>
void do_static_symbolic(const CSC<IT, NT>& A, const CSR<IT, NT>& B, IT startIdx, IT endIdx,
IT nrows_per_blocker, IT num_blockers,
IT* flops_by_row_blockers, IT& total_flops)
{
#pragma omp parallel for reduction(+ : flops_by_row_blockers[:num_blockers])
for (IT i = startIdx; i < endIdx; ++i)
{
IT rownnz = B.rowptr[i + 1] - B.rowptr[i];
for (IT j = A.colptr[i]; j < A.colptr[i + 1]; ++j)
{
uint16_t row_blocker_id = A.rowids[j] / nrows_per_blocker;
flops_by_row_blockers[row_blocker_id] += rownnz;
}
}
for (IT i = 0; i < num_blockers; ++i)
{
total_flops += flops_by_row_blockers[i];
}
}
template <typename IT, typename NT>
void do_symbolic(const CSC<IT, NT>& A, const CSR<IT, NT>& B, IT startIdx, IT endIdx,
IT num_blockers, IT* flops_by_rows, IT* rows_to_blockers,
IT* flops_by_row_blockers, IT& total_flops)
{
double avg_volumn = 0.0;
double cur_volumn = 0.0;
IT cur_blocker_id = 0;
// #pragma omp parallel
{
// #pragma omp for reduction(+ : flops_by_rows[:A.rows])
for (IT i = startIdx; i < endIdx; ++i)
{
IT rownnz = B.rowptr[i + 1] - B.rowptr[i];
for (IT j = A.colptr[i]; j < A.colptr[i + 1]; ++j)
{
flops_by_rows[A.rowids[j]] += rownnz;
}
}
#pragma omp parallel for reduction(+ : total_flops)
for (IT i = 0; i<A.rows; ++i)
total_flops += flops_by_rows[i];
}
avg_volumn = total_flops / num_blockers;
rows_to_blockers[0] = 0;
for (IT i=0; i<A.rows; ++i)
{
cur_volumn += flops_by_rows[i];
flops_by_row_blockers[cur_blocker_id] = cur_volumn;
rows_to_blockers[i] = cur_blocker_id;
if (cur_volumn > avg_volumn)
{
cur_blocker_id ++;
cur_volumn = 0;
}
}
// for (IT i = 0 ; i< num_blockers; ++i)
// cout << "BlockerId = " << i << " RowId = " << rows_to_blockers[i] << endl;
}
template <typename IT, typename NT>
int64_t getReqMemory(const CSC<IT, NT>& A, const CSR<IT, NT>& B)
{
uint64_t flop = getFlop(A, B);
return flop * sizeof(int64_t);
}
struct ExtractKey
{
inline int64_t operator()(tuple<int32_t, int32_t, double> tup)
{
int64_t res = std::get<0>(tup);
res = (res << 32);
res = res | (int64_t)(uint32_t) std::get<1>(tup);
return res;
}
};
struct ExtractKey2
{
inline uint32_t operator()(tuple<int32_t, int32_t, double> tup)
{
return ((std::get<0>(tup) % nrows_per_blocker) << 20 | (uint32_t) std::get<1>(tup));
// return (std::get<0>(tup) << 16) | ((uint32_t) std::get<1>(tup));
// return ((std::get<0>(tup) % flops_by_row_blockers[rows_to_blockers[std::get<0>(tup)]] << 16) | ((uint32_t) std::get<1>(tup)));
}
};
// struct CompareKey2
// {
// inline bool operator()(tuple<int32_t, int32_t, double> tup1, tuple<int32_t, int32_t, double> tup2)
// {
// return ((std::get<0>(tup1) % flops_by_row_blockers[rows_to_blockers[std::get<0>(tup1)]] << 16) | ((uint32_t) std::get<1>(tup1))) < ((std::get<0>(tup2) % flops_by_row_blockers[rows_to_blockers[std::get<0>(tup2)]] << 16) | ((uint32_t) std::get<1>(tup2)));
// }
// };
struct CompareTuple
{
inline bool operator()(tuple<int32_t, int32_t, double> t1, tuple<int32_t, int32_t, double> t2)
{
return (std::get<0>(t1) << 20 | (uint32_t) std::get<1>(t1)) < (std::get<0>(t2) << 20 | (uint32_t) std::get<1>(t2));
// if (std::get<0>(t1) < std::get<0>(t2))
// return true;
// else if (std::get<0>(t1) == std::get<0>(t2) && std::get<1>(t1) < std::get<1>(t2))
// return true;
// return false;
}
// if (std::get<1>(t1) != std::get<1>(t2))
// return false;
// if (std::get<0>(t1) != std::get<0>(t2))
// return false;
// return true;
};
template <typename IT, typename NT>
inline bool isTupleEqual (tuple<IT, IT, NT> t1, tuple<IT, IT, NT> t2)
{
return op(t1) == op(t2);
// if (std::get<1>(t1) != std::get<1>(t2))
// return false;
// if (std::get<0>(t1) != std::get<0>(t2))
// return false;
// return true;
}
template <typename IT, typename NT>
void doRadixSort(tuple<IT, IT, NT>* begin, tuple<IT, IT, NT>* end, tuple<IT, IT, NT>* buffer)
{
radix_sort(begin, end, buffer, ExtractKey2());
// vergesort::vergesort(begin, end, CompareTuple());
// pdqsort(begin, end, CompareTuple());
// gfx::timsort(begin, end, CompareTuple());
// sort(begin, end, compareTuple<IT, NT>);
}
template <typename IT, typename NT>
IT doMerge(tuple<IT, IT, NT>* vec, IT length)
{
if (length == 0) return 0;
auto op = ExtractKey();
IT i = 0;
IT j = 1;
while (i < length && j < length)
{
if (j < length && op(vec[i]) == op(vec[j]))
std::get<2>(vec[i]) += std::get<2>(vec[j]);
else
{
++i;
std::get<0>(vec[i]) = std::get<0>(vec[j]);
std::get<1>(vec[i]) = std::get<1>(vec[j]);
std::get<2>(vec[i]) = std::get<2>(vec[j]);
// vec[++i] = std::move(vec[j]);
}
++j;
}
return i + 1;
}
template <typename IT>
void initializeBlockerBoundary(IT* nums_per_col_blocker, IT num_blockers, IT* blocker_begin_ptr,
IT* blocker_end_ptr)
{
blocker_begin_ptr[0] = 0;
blocker_end_ptr[0] = 0;
for (IT blocker_index = 1; blocker_index < num_blockers; ++blocker_index)
{
blocker_begin_ptr[blocker_index] = blocker_begin_ptr[blocker_index - 1] + nums_per_col_blocker[blocker_index - 1];
blocker_end_ptr[blocker_index] = blocker_begin_ptr[blocker_index];
}
}
template <typename IT, typename NT>
void OuterSpGEMM_stage(const CSC<IT, NT>& A, const CSR<IT, NT>& B, IT startIdx, IT endIdx, CSR<IT, NT>& C, \
double &t_symbolic, double &t_pb1, double &t_sort, double &t_merge, double &t_convert, double &t_alloc, double &t_free, \
int nblockers, int nblockchars, double &ttflops)
{
double t1, t2, t3;
t1 = omp_get_wtime();
typedef tuple<IT, IT, NT> TripleNode;
const IT nthreads = omp_get_max_threads();
const IT num_blockers = nblockers;
const IT block_width = nblockchars;
ncols_of_A = A.cols;
nrows_per_blocker = A.rows <= num_blockers * 64 ? 64 : (A.rows + num_blockers - 1) / num_blockers;
IT total_flop = 0;
IT *flops_by_row_blockers = my_malloc<IT>(num_blockers);
IT *flops_by_rows = my_malloc<IT>(A.rows);
IT *nnz_by_row = my_malloc<IT>(A.rows);
// rows_to_blockers = static_cast<IT*>(::operator new(sizeof(IT) * A.rows));
t_alloc += (omp_get_wtime() - t1);
t1 = omp_get_wtime();
// do_symbolic(A, B, 0, A.rows, num_blockers, flops_by_rows, rows_to_blockers, flops_by_row_blockers, total_flop);
do_static_symbolic(A, B, 0, A.cols, nrows_per_blocker, num_blockers, flops_by_row_blockers, total_flop);
t_symbolic += (omp_get_wtime() - t1);
ttflops = total_flop;
// std::ofstream fd;
// fd.open("flop_by_row_blockers.csv");
// for (int i=0; i<num_blockers; ++ i)
// fd << i << " " << flops_by_row_blockers[i] << endl;
t1 = omp_get_wtime();
IT *global_blocker_counters = my_malloc<IT>(num_blockers);
TripleNode **global_blockers = my_malloc<TripleNode*>(num_blockers);
IT **local_blocker_counters = my_malloc<IT*>(nthreads);
TripleNode **local_blockers = my_malloc<TripleNode*>(nthreads);
TripleNode **sorting_buffer = my_malloc<TripleNode*>(nthreads);
IT *nnz_per_row_blocker = my_malloc<IT>(num_blockers);
IT max_flops_in_row_blockers = *std::max_element(flops_by_row_blockers, flops_by_row_blockers + num_blockers);
#pragma omp parallel for
for (IT blocker_id=0; blocker_id<num_blockers; ++blocker_id)
// global_blockers[blocker_id] = my_malloc<TripleNode>(flops_by_row_blockers[blocker_id]);
global_blockers[blocker_id] = static_cast<TripleNode*>(::operator new(SIZE * flops_by_row_blockers[blocker_id]));
uint64_t avg_flops_in_row_blockers = 0;
for (IT i = 0; i < num_blockers; ++i) {
avg_flops_in_row_blockers += flops_by_row_blockers[i];
}
// cout << "avg_flops_in_row_blockers = " << avg_flops_in_row_blockers / num_blockers << " max_flops_in_row_blockers = " << max_flops_in_row_blockers << endl;
#pragma omp parallel
{
IT thread_id = omp_get_thread_num();
// local_blockers[thread_id] = my_malloc<TripleNode>(num_blockers * block_width);
local_blockers[thread_id] = static_cast<TripleNode*>(::operator new(SIZE * num_blockers * block_width));
local_blocker_counters[thread_id] = my_malloc<IT>(num_blockers);
sorting_buffer[thread_id] = static_cast<TripleNode*>(::operator new(SIZE * max_flops_in_row_blockers));
}
t_alloc += (omp_get_wtime() - t1);
t1 = omp_get_wtime();
#pragma omp parallel
{
IT thread_id = omp_get_thread_num();
IT row_blocker_id;
TripleNode *begin_local_blockers, *cur_local_blockers, *cur_global_blockers;
// computing phase
#pragma omp for nowait
for (IT idx = startIdx; idx < endIdx; ++idx)
{
for (IT j = A.colptr[idx]; j < A.colptr[idx + 1]; ++j) // ncols(A) * 4
{
row_blocker_id = A.rowids[j] / nrows_per_blocker;
// row_blocker_id = get_blocker_id(A.rowids[j], rows_to_blockers, num_blockers);
// cout << "RowId = " << A.rowids[j] << " BlockerId = " << row_blocker_id << endl;
begin_local_blockers = local_blockers[thread_id] + row_blocker_id * block_width;
cur_local_blockers = begin_local_blockers + local_blocker_counters[thread_id][row_blocker_id];
for (IT k = B.rowptr[idx]; k < B.rowptr[idx + 1]; ++k) // nrows(B) * 4
{
// *cur_local_blockers = TripleNode(A.rowids[j], B.colids[k], A.values[j] * B.values[k]);
std::get<0>(*cur_local_blockers) = A.rowids[j];
std::get<1>(*cur_local_blockers) = B.colids[k];
std::get<2>(*cur_local_blockers) = A.values[j] * B.values[k];
cur_local_blockers++;
if (cur_local_blockers == begin_local_blockers + block_width) // flop * 16
{
// cur_global_blockers = global_blockers[row_blocker_id] + fetch_and_add(global_blocker_counters[row_blocker_id], block_width);
// std::memcpy(
// cur_global_blockers,
// begin_local_blockers,
// block_width * SIZE
// );
// for (IT offset=0; offset<block_width; ++offset)
// {
// std::get<0>(cur_global_blockers[offset]) = std::get<0>(begin_local_blockers[offset]);
// std::get<1>(cur_global_blockers[offset]) = std::get<1>(begin_local_blockers[offset]);
// std::get<2>(cur_global_blockers[offset]) = std::get<2>(begin_local_blockers[offset]);
// // cur_global_blockers[offset] = begin_local_blockers[offset];
// }
std::memcpy(
global_blockers[row_blocker_id] + __sync_fetch_and_add(&global_blocker_counters[row_blocker_id], block_width),
begin_local_blockers,
block_width * SIZE
);
cur_local_blockers = begin_local_blockers;
}
}
local_blocker_counters[thread_id][row_blocker_id] = cur_local_blockers - begin_local_blockers;
}
}
for (IT row_blocker_id = 0; row_blocker_id < num_blockers; row_blocker_id++)
{
// cur_global_blockers = global_blockers[row_blocker_id] + fetch_and_add(global_blocker_counters[row_blocker_id], local_blocker_counters[thread_id][row_blocker_id]);
// for (IT offset=0; offset<local_blocker_counters[thread_id][row_blocker_id]; ++offset)
// {
// std::get<0>(cur_global_blockers[offset]) = std::get<0>(local_blockers[thread_id][row_blocker_id * block_width + offset]);
// std::get<1>(cur_global_blockers[offset]) = std::get<1>(local_blockers[thread_id][row_blocker_id * block_width + offset]);
// std::get<2>(cur_global_blockers[offset]) = std::get<2>(local_blockers[thread_id][row_blocker_id * block_width + offset]);
// // cur_global_blockers[offset] = local_blockers[thread_id][row_blocker_id * block_width + offset];
// }
std::memcpy(
global_blockers[row_blocker_id] + __sync_fetch_and_add(&global_blocker_counters[row_blocker_id], local_blocker_counters[thread_id][row_blocker_id]),
local_blockers[thread_id] + row_blocker_id * block_width,
local_blocker_counters[thread_id][row_blocker_id] * SIZE
);
local_blocker_counters[thread_id][row_blocker_id] = 0;
}
}
t_pb1 += (omp_get_wtime() - t1);
t1 = omp_get_wtime();
#pragma omp parallel
{
IT thread_id = omp_get_thread_num();
#pragma omp for
for (IT row_blocker_id = 0; row_blocker_id < num_blockers; ++row_blocker_id)
{
doRadixSort(global_blockers[row_blocker_id],
global_blockers[row_blocker_id] + global_blocker_counters[row_blocker_id],
sorting_buffer[thread_id]);
}
}
t_sort += (omp_get_wtime() - t1);
t1 = omp_get_wtime();
#pragma omp parallel for
for (IT row_blocker_id = 0; row_blocker_id < num_blockers; ++row_blocker_id)
nnz_per_row_blocker[row_blocker_id] += doMerge(global_blockers[row_blocker_id], global_blocker_counters[row_blocker_id]);
t_merge += (omp_get_wtime() - t1);
IT *cumulative_row_indices = my_malloc<IT>(num_blockers + 1);
scan(nnz_per_row_blocker, cumulative_row_indices, (IT)(num_blockers) + 1);
IT total_nnz = cumulative_row_indices[num_blockers];
if (C.isEmpty())
{
C.make_empty();
}
C.rows = A.rows;
C.cols = B.cols;
C.nnz = total_nnz;
C.colids = static_cast<IT*>(::operator new(sizeof(IT[total_nnz])));
C.rowptr = static_cast<IT*>(::operator new(sizeof(IT[C.rows+1])));
C.values = static_cast<NT*>(::operator new(sizeof(NT[total_nnz])));
C.rowptr[0] = 0;
t1 = omp_get_wtime();
#pragma omp parallel for
for (IT row_blocker_id = 0; row_blocker_id < num_blockers; ++row_blocker_id)
{
IT base = cumulative_row_indices[row_blocker_id];
// TripleNode* this_blocker = global_blockers[row_blocker_id];
// auto space_addr = global_blockers[row_blocker_id];
for (IT i = 0; i < nnz_per_row_blocker[row_blocker_id]; ++i)
{
++nnz_by_row[std::get<0>(global_blockers[row_blocker_id][i])];
C.colids[base + i] = std::get<1>(global_blockers[row_blocker_id][i]);
C.values[base + i] = std::get<2>(global_blockers[row_blocker_id][i]);
// ++nnz_by_row[std::get<0>(this_blocker[i])];
// C.colids[base+i] = std::get<1>(this_blocker[i]);
// C.values[base+i] = std::get<2>(this_blocker[i]);
}
}
t_convert += (omp_get_wtime() - t1);
scan(nnz_by_row, C.rowptr, C.rows + 1);
t1 = omp_get_wtime();
my_free<IT>(flops_by_row_blockers);
my_free<IT>(nnz_by_row);
my_free<IT>(nnz_per_row_blocker);
my_free<IT>(cumulative_row_indices);
for (IT row_blocker_id = 0; row_blocker_id < num_blockers; ++row_blocker_id)
{
my_free<TripleNode>(global_blockers[row_blocker_id]);
}
my_free<TripleNode*>(global_blockers);
for (IT thread_id=0; thread_id<nthreads; ++thread_id)
{
my_free<TripleNode>(local_blockers[thread_id]);
my_free<IT>(local_blocker_counters[thread_id]);
}
my_free<TripleNode*>(local_blockers);
my_free<IT*>(local_blocker_counters);
t_free += (omp_get_wtime() - t1);
}
template <typename IT, typename NT>
void OuterSpGEMM(const CSC<IT, NT>& A, const CSR<IT, NT>& B, CSR<IT, NT>& C, int nblockers, int nblockchars)
{
// cout << "nblockers = " << nblockers << " nblockchars = " << nblockchars << endl;
double t_symbolic = 0;
double t_pb1 = 0;
double t_pb2 = 0;
double t_sort = 0;
double t_merge = 0;
double t_convert = 0;
double t_alloc = 0;
double t_free = 0;
double ttflops = 0;
int niter = 5;
for (int i = 0; i < niter; ++i) {
OuterSpGEMM_stage(A, B, 0, A.cols, C, t_symbolic, t_pb1, t_sort, t_merge, t_convert, t_alloc, t_free, nblockers, nblockchars, ttflops);
// cout << "Iter " << i << " Completed!" << endl;
}
cout << "symbolic took " << t_symbolic / niter * 1000 << endl;
cout << "PB1 took " << t_pb1 / niter * 1000 << endl;
cout << "Sort took " << t_sort / niter * 1000 << endl;
cout << "Merge took " << t_merge / niter * 1000 << endl;
cout << "Convert took " << t_convert / niter * 1000 << endl;
cout << "Alloc took " << t_alloc / niter * 1000 << endl;
cout << "Free took " << t_free / niter << endl << endl;
double FLOPS_pb1 = ttflops / (1000000000 * (t_pb1 / niter));
double bytes_pb1 = (A.nnz + B.nnz) * (sizeof(IT) + sizeof(NT)) + (A.cols + B.rows) * sizeof(IT) + 2 * ttflops * (2 * sizeof(IT) + sizeof(NT));
double BW_pb1 = bytes_pb1 / (1000000000 * (t_pb1 / niter));
double BW_sort = (9.0 * 16 * ttflops) / (1000000000 * (t_sort / niter));
cout << "PB Bandwidth = " << BW_pb1 << " GB/s" << " GFLOPS = " << FLOPS_pb1 << " Data = " << bytes_pb1 / 1000000000 << " Total flops = " << ttflops << endl;
cout << "Sort Bandwidth = " << BW_sort << " GB/s" << endl;
}
|
begin_declare_variant_messages.c | // RUN: %clang_cc1 -triple=x86_64-pc-win32 -verify -fopenmp -x c -std=c99 -fms-extensions -Wno-pragma-pack %s
// RUN: %clang_cc1 -triple=x86_64-pc-win32 -verify -fopenmp-simd -x c -std=c99 -fms-extensions -Wno-pragma-pack %s
#pragma omp begin // expected-error {{expected an OpenMP directive}}
#pragma omp end declare variant // expected-error {{'#pragma omp end declare variant' with no matching '#pragma omp begin declare variant'}}
#pragma omp begin declare // expected-error {{expected an OpenMP directive}}
#pragma omp end declare variant // expected-error {{'#pragma omp end declare variant' with no matching '#pragma omp begin declare variant'}}
#pragma omp begin variant // expected-error {{expected an OpenMP directive}}
#pragma omp end declare variant // expected-error {{'#pragma omp end declare variant' with no matching '#pragma omp begin declare variant'}}
#pragma omp variant begin // expected-error {{expected an OpenMP directive}}
#pragma omp declare variant end // expected-error {{function declaration is expected after 'declare variant' directive}}
#pragma omp begin declare variant // expected-error {{expected 'match' clause on 'omp declare variant' directive}}
#pragma omp end declare variant
// TODO: Issue an error message
#pragma omp end declare variant // expected-error {{'#pragma omp end declare variant' with no matching '#pragma omp begin declare variant'}}
#pragma omp end declare variant // expected-error {{'#pragma omp end declare variant' with no matching '#pragma omp begin declare variant'}}
#pragma omp end declare variant // expected-error {{'#pragma omp end declare variant' with no matching '#pragma omp begin declare variant'}}
#pragma omp end declare variant // expected-error {{'#pragma omp end declare variant' with no matching '#pragma omp begin declare variant'}}
int foo(void);
const int var;
#pragma omp begin declare variant // expected-error {{expected 'match' clause on 'omp declare variant' directive}}
#pragma omp end declare variant
#pragma omp begin declare variant xxx // expected-error {{expected 'match' clause on 'omp declare variant' directive}}
#pragma omp end declare variant
#pragma omp begin declare variant match // expected-error {{expected '(' after 'match'}}
#pragma omp end declare variant
#pragma omp begin declare variant match( // expected-error {{expected ')'}} expected-warning {{expected identifier or string literal describing a context set; set skipped}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} expected-note {{to match this '('}}
#pragma omp end declare variant
#pragma omp begin declare variant match() // expected-warning {{expected identifier or string literal describing a context set; set skipped}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
#pragma omp end declare variant
#pragma omp begin declare variant match(xxx) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
#pragma omp end declare variant
#pragma omp begin declare variant match(xxx=) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
#pragma omp end declare variant
#pragma omp begin declare variant match(xxx=yyy) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
#pragma omp end declare variant
#pragma omp begin declare variant match(xxx=yyy}) // expected-error {{expected ')'}} expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-warning {{extra tokens at the end of '#pragma omp begin declare variant' are ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} expected-note {{to match this '('}}
#pragma omp end declare variant
#pragma omp begin declare variant match(xxx={) // expected-error {{expected ')'}} expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} expected-note {{to match this '('}}
#pragma omp end declare variant
#pragma omp begin declare variant match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
#pragma omp end declare variant
#pragma omp begin declare variant match(xxx={vvv, vvv}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
#pragma omp end declare variant
#pragma omp begin declare variant match(xxx={vvv} xxx) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
#pragma omp end declare variant
#pragma omp begin declare variant match(xxx={vvv}) xxx // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-warning {{extra tokens at the end of '#pragma omp begin declare variant' are ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}}
#pragma omp end declare variant
#pragma omp begin declare variant match(implementation={xxx}) // expected-warning {{'xxx' is not a valid context selector for the context set 'implementation'; selector ignored}} expected-note {{context selector options are: 'vendor' 'extension' 'unified_address' 'unified_shared_memory' 'reverse_offload' 'dynamic_allocators' 'atomic_default_mem_order'}} expected-note {{the ignored selector spans until here}}
#pragma omp end declare variant
#pragma omp begin declare variant match(implementation={vendor}) // expected-warning {{the context selector 'vendor' in context set 'implementation' requires a context property defined in parentheses; selector ignored}} expected-note {{the ignored selector spans until here}}
#pragma omp end declare variant
#pragma omp begin declare variant match(implementation={vendor(}) // expected-error {{expected ')'}} expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{context property options are: 'amd' 'arm' 'bsc' 'cray' 'fujitsu' 'gnu' 'ibm' 'intel' 'llvm' 'pgi' 'ti' 'unknown'}} expected-note {{to match this '('}}
#pragma omp end declare variant
#pragma omp begin declare variant match(implementation={vendor()}) // expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{context property options are: 'amd' 'arm' 'bsc' 'cray' 'fujitsu' 'gnu' 'ibm' 'intel' 'llvm' 'pgi' 'ti' 'unknown'}}
#pragma omp end declare variant
#pragma omp begin declare variant match(implementation={vendor(score ibm)}) // expected-error {{expected '(' after 'score'}} expected-warning {{expected '':'' after the score expression; '':'' assumed}}
#pragma omp end declare variant
#pragma omp begin declare variant match(implementation={vendor(score( ibm)}) // expected-error {{use of undeclared identifier 'ibm'}} expected-error {{expected ')'}} expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{context property options are: 'amd' 'arm' 'bsc' 'cray' 'fujitsu' 'gnu' 'ibm' 'intel' 'llvm' 'pgi' 'ti' 'unknown'}} expected-note {{to match this '('}}
#pragma omp end declare variant
#pragma omp begin declare variant match(implementation={vendor(score(2 ibm)}) // expected-error {{expected ')'}} expected-error {{expected ')'}} expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{to match this '('}} expected-note {{context property options are: 'amd' 'arm' 'bsc' 'cray' 'fujitsu' 'gnu' 'ibm' 'intel' 'llvm' 'pgi' 'ti' 'unknown'}} expected-note {{to match this '('}}
#pragma omp end declare variant
#pragma omp begin declare variant match(implementation={vendor(score(foo()) ibm)}) // expected-warning {{expected '':'' after the score expression; '':'' assumed}}
#pragma omp end declare variant
#pragma omp begin declare variant match(implementation={vendor(score(5): ibm), vendor(llvm)}) // expected-warning {{the context selector 'vendor' was used already in the same 'omp declare variant' directive; selector ignored}} expected-note {{the previous context selector 'vendor' used here}} expected-note {{the ignored selector spans until here}}
#pragma omp end declare variant
#pragma omp begin declare variant match(implementation={vendor(score(5): ibm), kind(cpu)}) // expected-warning {{the context selector 'kind' is not valid for the context set 'implementation'; selector ignored}} expected-note {{the context selector 'kind' can be nested in the context set 'device'; try 'match(device={kind(property)})'}} expected-note {{the ignored selector spans until here}}
#pragma omp end declare variant
#pragma omp begin declare variant match(device={xxx}) // expected-warning {{'xxx' is not a valid context selector for the context set 'device'; selector ignored}} expected-note {{context selector options are: 'kind' 'isa' 'arch'}} expected-note {{the ignored selector spans until here}}
#pragma omp end declare variant
#pragma omp begin declare variant match(device={kind}) // expected-warning {{the context selector 'kind' in context set 'device' requires a context property defined in parentheses; selector ignored}} expected-note {{the ignored selector spans until here}}
#pragma omp end declare variant
#pragma omp begin declare variant match(device={kind(}) // expected-error {{expected ')'}} expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{context property options are: 'host' 'nohost' 'cpu' 'gpu' 'fpga' 'any'}} expected-note {{to match this '('}}
#pragma omp end declare variant
#pragma omp begin declare variant match(device={kind()}) // expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{context property options are: 'host' 'nohost' 'cpu' 'gpu' 'fpga' 'any'}}
#pragma omp end declare variant
#pragma omp begin declare variant match(device={kind(score cpu)}) // expected-error {{expected '(' after 'score'}} expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('<invalid>'); score ignored}}
#pragma omp end declare variant
#pragma omp begin declare variant match(device = {kind(score(ibm) }) // expected-error {{use of undeclared identifier 'ibm'}} expected-error {{expected ')'}} expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('<recovery-expr>()'); score ignored}} expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{context property options are: 'host' 'nohost' 'cpu' 'gpu' 'fpga' 'any'}} expected-note {{to match this '('}}
#pragma omp end declare variant
#pragma omp begin declare variant match(device={kind(score(2 gpu)}) // expected-error {{expected ')'}} expected-error {{expected ')'}} expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('2'); score ignored}} expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{to match this '('}} expected-note {{context property options are: 'host' 'nohost' 'cpu' 'gpu' 'fpga' 'any'}} expected-note {{to match this '('}}
#pragma omp end declare variant
#pragma omp begin declare variant match(device={kind(score(foo()) ibm)}) // expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('foo()'); score ignored}} expected-warning {{'ibm' is not a valid context property for the context selector 'kind' and the context set 'device'; property ignored}} expected-note {{try 'match(implementation={vendor(ibm)})'}} expected-note {{the ignored property spans until here}}
#pragma omp end declare variant
#pragma omp begin declare variant match(device={kind(score(5): host), kind(llvm)}) // expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('5'); score ignored}} expected-warning {{the context selector 'kind' was used already in the same 'omp declare variant' directive; selector ignored}} expected-note {{the previous context selector 'kind' used here}} expected-note {{the ignored selector spans until here}}
#pragma omp end declare variant
#pragma omp begin declare variant match(device={kind(score(5): nohost), vendor(llvm)}) // expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('5'); score ignored}} expected-warning {{the context selector 'vendor' is not valid for the context set 'device'; selector ignored}} expected-note {{the context selector 'vendor' can be nested in the context set 'implementation'; try 'match(implementation={vendor(property)})'}} expected-note {{the ignored selector spans until here}}
#pragma omp end declare variant
#pragma omp begin declare variant match(device = {kind(score(foo()): cpu}) // expected-error {{expected ')'}} expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('foo()'); score ignored}} expected-note {{to match this '('}}
#pragma omp end declare variant
#pragma omp begin declare variant match(device = {kind(score(foo()): cpu)) // expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('foo()'); score ignored}} expected-warning {{expected '}' after the context selectors for the context set "device"; '}' assumed}}
#pragma omp end declare variant
#pragma omp begin declare variant match(device = {kind(score(foo()): cpu)} // expected-error {{expected ')'}} expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('foo()'); score ignored}} expected-note {{to match this '('}}
#pragma omp end declare variant
#pragma omp begin declare variant match(implementation = {vendor(score(foo) :llvm)})
#pragma omp end declare variant
#pragma omp begin declare variant match(implementation = {vendor(score(foo()) :llvm)})
#pragma omp end declare variant
#pragma omp begin declare variant match(implementation = {vendor(score(<expr>) :llvm)}) // expected-error {{expected expression}} expected-error {{use of undeclared identifier 'expr'}} expected-error {{expected expression}}
#pragma omp end declare variant
#pragma omp begin declare variant match(user = {condition(foo)})
#pragma omp end declare variant
#pragma omp begin declare variant match(user = {condition(foo())})
#pragma omp end declare variant
#pragma omp begin declare variant match(user = {condition(<expr>)}) // expected-error {{expected expression}} expected-error {{use of undeclared identifier 'expr'}} expected-error {{expected expression}} expected-note {{the ignored selector spans until here}}
#pragma omp end declare variant
#pragma omp begin declare variant match(device = {kind(score(&var): cpu)}) // expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('&var'); score ignored}}
#pragma omp end declare variant
#pragma omp begin declare variant match(device = {kind(score(var): cpu)}) // expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('var'); score ignored}}
#pragma omp end declare variant
#pragma omp begin declare variant match(device = {kind(score(foo): cpu)}) // expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('foo'); score ignored}}
#pragma omp end declare variant
#pragma omp begin declare variant match(device = {kind(score(foo()): cpu)}) // expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('foo()'); score ignored}}
#pragma omp end declare variant
#pragma omp begin declare variant match(device = {kind(score(<expr>): cpu)}) // expected-error {{expected expression}} expected-error {{use of undeclared identifier 'expr'}} expected-error {{expected expression}} expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('<invalid>'); score ignored}}
#pragma omp end declare variant
#pragma omp begin declare variant match(device={kind(cpu)})
static int defined_twice_a(void) { // expected-note {{previous definition is here}}
return 0;
}
int defined_twice_b(void) { // expected-note {{previous definition is here}}
return 0;
}
inline int defined_twice_c(void) { // expected-note {{previous definition is here}}
return 0;
}
#pragma omp end declare variant
#pragma omp begin declare variant match(device={kind(cpu)})
static int defined_twice_a(void) { // expected-error {{redefinition of 'defined_twice_a[device={kind(cpu)}]'}}
return 1;
}
int defined_twice_b(void) { // expected-error {{redefinition of 'defined_twice_b[device={kind(cpu)}]'}}
return 1;
}
inline int defined_twice_c(void) { // expected-error {{redefinition of 'defined_twice_c[device={kind(cpu)}]'}}
return 1;
}
#pragma omp end declare variant
// TODO: Issue an error message
#pragma omp begin declare variant match(device={kind(cpu)})
// The matching end is missing. Since the device clause is matching we will
// emit and error.
int also_before(void) {
return 0;
}
#pragma omp begin declare variant match(device={kind(gpu)}) // expected-note {{to match this '#pragma omp begin declare variant'}}
// The matching end is missing. Since the device clause is not matching we will
// cause us to elide the rest of the file and emit and error.
int also_after(void) {
return 2;
}
int also_before(void) {
return 2;
}
#pragma omp begin declare variant match(device={kind(fpga)})
This text is never parsed!
#pragma omp end declare variant
This text is also not parsed! // expected-error {{expected '#pragma omp end declare variant'}}
|
dynamic_module_load.c | // RUN: %libomptarget-compile-aarch64-unknown-linux-gnu -DSHARED -fPIC -shared -o %t.so && %clang %flags %s -o %t-aarch64-unknown-linux-gnu -ldl && %libomptarget-run-aarch64-unknown-linux-gnu %t.so 2>&1 | %fcheck-aarch64-unknown-linux-gnu
// RUN: %libomptarget-compile-powerpc64-ibm-linux-gnu -DSHARED -fPIC -shared -o %t.so && %clang %flags %s -o %t-powerpc64-ibm-linux-gnu -ldl && %libomptarget-run-powerpc64-ibm-linux-gnu %t.so 2>&1 | %fcheck-powerpc64-ibm-linux-gnu
// RUN: %libomptarget-compile-powerpc64le-ibm-linux-gnu -DSHARED -fPIC -shared -o %t.so && %clang %flags %s -o %t-powerpc64le-ibm-linux-gnu -ldl && %libomptarget-run-powerpc64le-ibm-linux-gnu %t.so 2>&1 | %fcheck-powerpc64le-ibm-linux-gnu
// RUN: %libomptarget-compile-x86_64-pc-linux-gnu -DSHARED -fPIC -shared -o %t.so && %clang %flags %s -o %t-x86_64-pc-linux-gnu -ldl && %libomptarget-run-x86_64-pc-linux-gnu %t.so 2>&1 | %fcheck-x86_64-pc-linux-gnu
// RUN: %libomptarget-compile-nvptx64-nvidia-cuda -DSHARED -fPIC -shared -o %t.so && %clang %flags %s -o %t-nvptx64-nvidia-cuda -ldl && %libomptarget-run-nvptx64-nvidia-cuda %t.so 2>&1 | %fcheck-nvptx64-nvidia-cuda
// [BOLT] It takes too long time. Let's remove this from tests.
// UNSUPPORTED: clang-11
#ifdef SHARED
#include <stdio.h>
int foo() {
#pragma omp target
;
printf("%s\n", "DONE.");
return 0;
}
#else
#include <dlfcn.h>
#include <stdio.h>
int main(int argc, char **argv) {
void *Handle = dlopen(argv[1], RTLD_NOW);
int (*Foo)(void);
if (Handle == NULL) {
printf("dlopen() failed: %s\n", dlerror());
return 1;
}
Foo = (int (*)(void)) dlsym(Handle, "foo");
if (Handle == NULL) {
printf("dlsym() failed: %s\n", dlerror());
return 1;
}
// CHECK: DONE.
// CHECK-NOT: {{abort|fault}}
return Foo();
}
#endif
|
softmax-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2017 by Contributors
* \file softmax-inl.h
* \brief
*/
#ifndef MXNET_OPERATOR_NN_SOFTMAX_INL_H_
#define MXNET_OPERATOR_NN_SOFTMAX_INL_H_
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
#include <type_traits>
#include "../mxnet_op.h"
#include "../operator_common.h"
#include "../tensor/broadcast_reduce_op.h"
#include "../../common/cuda_utils.h"
namespace mxnet {
namespace op {
namespace mxnet_op {
struct softmax_fwd {
template<typename AType>
MSHADOW_XINLINE static AType Map(float a, AType b) {
return AType(expf(a)/b);
}
template<typename AType>
MSHADOW_XINLINE static AType Map(double a, AType b) {
return AType(exp(a)/b);
}
};
struct log_softmax_fwd {
template<typename DType>
MSHADOW_XINLINE static float Map(DType a, float b) {
return a - logf(b);
}
template<typename DType>
MSHADOW_XINLINE static double Map(DType a, double b) {
return a - log(b);
}
};
template<typename OP, bool negate, typename AType, typename DType, typename OType,
typename IType, int ndim>
inline void Softmax(Stream<cpu> *s, DType *in, OType *out, IType *length,
Shape<ndim> shape, int axis, const DType temperature) {
index_t M = shape[axis];
index_t N = shape.Size()/M;
Shape<ndim> stride = calc_stride(shape);
Shape<ndim> sshape = shape;
sshape[axis] = 1;
index_t sa = stride[axis];
if (length == nullptr) {
#pragma omp parallel for
for (index_t i = 0; i < N; ++i) {
index_t base = unravel_dot(i, sshape, stride);
DType mmax = negate ? -in[base] : in[base];
DType val;
for (index_t j = 1; j < M; ++j) {
val = negate ? -in[base + j*sa] : in[base + j*sa];
if (mmax < val) mmax = val;
}
AType sum = AType(0);
DType in_val;
// By default temperature is 1.0.
// Adding a branch here to save the CPU 'divide-by-1' computation at runtime
if (temperature == 1.0) {
for (index_t j = 0; j < M; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
sum += std::exp(in_val - mmax);
}
for (index_t j = 0; j < M; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
out[base + j*sa] = OP::Map(in_val - mmax, sum);
}
} else {
for (index_t j = 0; j < M; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
sum += std::exp((in_val - mmax)/temperature);
}
for (index_t j = 0; j < M; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
out[base + j*sa] = OP::Map((in_val - mmax)/temperature, sum);
}
}
}
} else {
#pragma omp parallel for
for (index_t i = 0; i < N; ++i) {
index_t len = static_cast<index_t>(length[i]);
index_t base = unravel_dot(i, sshape, stride);
DType mmax = negate ? -in[base] : in[base];
DType val;
for (index_t j = 1; j < len; ++j) {
val = negate ? -in[base + j*sa] : in[base + j*sa];
if (mmax < val) mmax = val;
}
for (index_t j = len; j < M; ++j) {
out[base + j*sa] = OType(0.0f);
}
AType sum = AType(0);
DType in_val;
// By default temperature is 1.0.
// Adding a branch here to save the CPU 'divide-by-1' computation at runtime
if (temperature == 1.0) {
for (index_t j = 0; j < len; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
sum += std::exp(in_val - mmax);
}
for (index_t j = 0; j < len; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
out[base + j*sa] = OP::Map(in_val - mmax, sum);
}
} else {
for (index_t j = 0; j < len; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
sum += std::exp((in_val - mmax)/temperature);
}
for (index_t j = 0; j < len; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
out[base + j*sa] = OP::Map((in_val - mmax)/temperature, sum);
}
}
}
}
}
struct softmax_bwd {
template<typename DType, typename AType>
MSHADOW_XINLINE static AType Map(DType ograd, DType out, AType sum) {
return AType(out * (ograd - sum));
}
};
struct log_softmax_bwd {
template<typename AType>
MSHADOW_XINLINE static AType Map(float ograd, float out, AType sum) {
return AType(ograd - expf(out)*sum);
}
template<typename AType>
MSHADOW_XINLINE static AType Map(double ograd, double out, AType sum) {
return AType(ograd - exp(out)*sum);
}
};
template<typename OP1, typename OP2, int Req, bool negate,
typename AType, typename DType, typename OType, typename IType, int ndim>
inline void SoftmaxGrad(Stream<cpu> *s, OType *out, OType *ograd,
DType *igrad, IType *length, Shape<ndim> shape,
int axis, const DType temperature) {
index_t M = shape[axis];
index_t N = shape.Size()/M;
Shape<ndim> stride = calc_stride(shape);
Shape<ndim> sshape = shape;
sshape[axis] = 1;
index_t sa = stride[axis];
if (length != nullptr) {
#pragma omp parallel for
for (index_t i = 0; i < N; ++i) {
index_t base = unravel_dot(i, sshape, stride);
index_t len = static_cast<index_t>(length[i]);
AType sum = AType(0);
for (index_t j = 0; j < len; ++j) {
sum += OP1::Map(ograd[base + j*sa], out[base + j*sa]);
}
// By default temperature is 1.0.
// Adding a branch here to save the CPU 'divide-by-1' computation at runtime
DType final_result;
if (temperature == 1.0) {
for (index_t j = 0; j < M; ++j) {
final_result = negate ?
-OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) :
OP2::Map(ograd[base + j*sa], out[base + j*sa], sum);
final_result = (j < len) ? final_result : DType(0.0f);
KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result);
}
} else {
for (index_t j = 0; j < M; ++j) {
final_result = negate ?
-OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature :
OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature;
final_result = (j < len) ? final_result : DType(0.0f);
KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result);
}
}
}
} else {
#pragma omp parallel for
for (index_t i = 0; i < N; ++i) {
index_t base = unravel_dot(i, sshape, stride);
AType sum = AType(0);
for (index_t j = 0; j < M; ++j) {
sum += OP1::Map(ograd[base + j*sa], out[base + j*sa]);
}
// By default temperature is 1.0.
// Adding a branch here to save the CPU 'divide-by-1' computation at runtime
DType final_result;
if (temperature == 1.0) {
for (index_t j = 0; j < M; ++j) {
final_result = negate ?
-OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) :
OP2::Map(ograd[base + j*sa], out[base + j*sa], sum);
KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result);
}
} else {
for (index_t j = 0; j < M; ++j) {
final_result = negate ?
-OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature :
OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature;
KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result);
}
}
}
}
}
#ifdef __CUDACC__
template<int x_bits, typename OP, bool negate, typename AType, int ndim,
typename DType, typename OType, typename IType>
__global__ void softmax_compute_kernel(DType *in, OType *out, IType *length,
index_t M, int axis, Shape<ndim> sshape,
Shape<ndim> stride, const double temperature) {
const unsigned x_size = 1 << x_bits;
__shared__ AType smem[x_size];
index_t sa = stride[axis];
index_t base = unravel_dot(blockIdx.x, sshape, stride);
index_t x = threadIdx.x;
const index_t len = length == nullptr ? M : static_cast<index_t>(length[blockIdx.x]);
red::maximum::SetInitValue(smem[x]);
for (index_t i = x; i < len; i += x_size) {
smem[x] = ::max(smem[x], negate ? -in[base + i*sa] : in[base + i*sa]);
}
__syncthreads();
cuda::Reduce1D<red::maximum, x_bits>(smem);
__syncthreads();
DType smax = smem[0];
__syncthreads();
red::sum::SetInitValue(smem[x]);
DType val;
for (index_t i = x; i < len; i += x_size) {
val = negate ? -in[base + i*sa]:in[base + i*sa];
smem[x] += static_cast<AType>(expf((val - smax) / static_cast<AType>(temperature)));
}
__syncthreads();
cuda::Reduce1D<red::sum, x_bits>(smem);
__syncthreads();
AType ssum = smem[0];
__syncthreads();
for (index_t i = x; i < M; i += x_size) {
val = negate ? -in[base + i*sa] : in[base + i*sa];
out[base + i*sa] =
(i < len) ? OType(OP::Map((val - smax)/static_cast<DType>(temperature), ssum)) : OType(0.0f);
}
}
const int softmax_threads_per_block = 512;
template<typename OP, bool negate, typename AType, typename LType,
typename DType, typename OType, typename IType>
__global__ void softmax_stride1_compute_kernel(const DType *in, OType *out, IType *length,
const index_t M, const double temperature,
const int rows_per_block, const index_t total_rows) {
__shared__ AType scratch[softmax_threads_per_block];
__shared__ LType persistent_storage[20 * 1024 / sizeof(LType)];
const int warp_size = 32;
const int threads_per_row = softmax_threads_per_block / rows_per_block;
const int my_local_row = threadIdx.x / threads_per_row;
const int my_row = blockIdx.x * rows_per_block + my_local_row;
if (my_row >= total_rows) return;
const int my_id = threadIdx.x % threads_per_row;
const int entries_per_load = sizeof(LType)/sizeof(DType);
const index_t len = length == nullptr ? M : static_cast<index_t>(length[my_row]);
// Due to usage of MSHADOW_TYPE_SWITCH macro we are generating
// kernels where sizeof(LType) may be less than sizeof(DType),
// resulting in entries_per_load being 0.
// This is not a valid combination and is being checked against
// in the launcher code. This switch here is just to silence
// the division by zero warning generated for such invalid cases.
const int row_length = entries_per_load > 0 ? M / entries_per_load : 0;
const LType* in_aligned = reinterpret_cast<const LType*>(in);
size_t base = my_row * row_length;
for (index_t i = my_id; i < row_length; i += threads_per_row) {
persistent_storage[my_local_row * row_length + i] = in_aligned[base + i];
}
DType * row = reinterpret_cast<DType *>(persistent_storage + my_local_row * row_length);
__syncthreads();
DType my_max_value;
red::maximum::SetInitValue(my_max_value);
for (index_t i = my_id; i < len; i += threads_per_row) {
my_max_value = ::max(my_max_value, negate ? -row[i] : row[i]);
}
scratch[threadIdx.x] = my_max_value;
__syncthreads();
for (int size = threads_per_row / 2; size >= warp_size; size /= 2) {
if (my_id < size) {
scratch[threadIdx.x] = ::max(scratch[threadIdx.x], scratch[threadIdx.x + size]);
}
__syncthreads();
}
if (my_id < warp_size) {
AType my_value = warp_reduce(scratch[threadIdx.x],
[](AType x, AType y) { return ::max(x, y); });
scratch[threadIdx.x] = my_value;
}
__syncthreads();
DType smax = scratch[threadIdx.x - threadIdx.x % threads_per_row];
__syncthreads();
AType my_sum;
red::sum::SetInitValue(my_sum);
for (index_t i = my_id; i < len; i += threads_per_row) {
const DType val = negate ? -row[i] : row[i];
my_sum += static_cast<AType>(expf((val - smax) / static_cast<AType>(temperature)));
}
scratch[threadIdx.x] = my_sum;
__syncthreads();
for (int size = threads_per_row / 2; size >= warp_size; size /= 2) {
if (my_id < size) {
scratch[threadIdx.x] += scratch[threadIdx.x + size];
}
__syncthreads();
}
if (my_id < warp_size) {
AType my_value = warp_reduce(scratch[threadIdx.x],
[](AType x, AType y) { return x + y;});
scratch[threadIdx.x] = my_value;
}
__syncthreads();
AType ssum = scratch[threadIdx.x - threadIdx.x % threads_per_row];
__syncthreads();
for (index_t i = my_id; i < M; i += threads_per_row) {
const DType val = negate ? -row[i] : row[i];
row[i] = (i < len) ? DType(OP::Map((val - smax)/static_cast<DType>(temperature), ssum)) :
DType(0.0f);
}
__syncthreads();
LType* out_aligned = reinterpret_cast<LType*>(out);
for (index_t i = my_id; i < row_length; i += threads_per_row) {
out_aligned[base + i] = persistent_storage[my_local_row * row_length + i];
}
}
template<typename OP, bool negate, typename AType, typename DType, typename OType,
typename IType, int ndim>
inline void Softmax(Stream<gpu> *s, DType *in, OType *out, IType *length,
Shape<ndim> shape, int axis, const double temperature) {
const int x_bits = 7;
const int x_size = 1 << x_bits;
index_t M = shape[axis];
index_t N = shape.Size()/M;
Shape<ndim> stride = calc_stride(shape);
Shape<ndim> sshape = shape;
sshape[axis] = 1;
const size_t DSize = sizeof(DType);
// Using 20 kB of shared memory for persistent storage in the optimized case
const size_t max_opt_M = 20 * 1024 / DSize;
if (stride[axis] == 1 &&
static_cast<size_t>(M) <= max_opt_M &&
std::is_same<DType, OType>::value) {
int ltype = mxnet::common::cuda::get_load_type(M * sizeof(DType));
MXNET_LOAD_TYPE_SWITCH(ltype, LType, {
int rows_per_block = mxnet::common::cuda::get_rows_per_block(M *
sizeof(DType) / sizeof(LType),
softmax_threads_per_block);
int nblocks = (N + rows_per_block - 1) / rows_per_block;
CHECK_LE(sizeof(DType), sizeof(LType));
softmax_stride1_compute_kernel<OP, negate, AType, LType>
<<<nblocks, softmax_threads_per_block, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
in, out, length, M, temperature, rows_per_block, N);
});
MSHADOW_CUDA_POST_KERNEL_CHECK(softmax_stride1_compute_kernel);
} else {
softmax_compute_kernel<x_bits, OP, negate, AType, ndim>
<<<N, x_size, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
in, out, length, M, axis, sshape, stride, temperature);
MSHADOW_CUDA_POST_KERNEL_CHECK(softmax_compute_kernel);
}
}
template<typename OP1, typename OP2, int Req, bool negate, typename AType, typename LType,
typename DType, typename OType, typename IType>
__global__ void softmax_stride1_grad_kernel(const OType *out, const OType *ograd,
DType *igrad, const IType *length,
const index_t M,
const double temperature,
const int rows_per_block,
const index_t total_rows) {
__shared__ AType scratch[softmax_threads_per_block];
__shared__ LType persistent_storage[20 * 1024 / sizeof(LType)];
const int warp_size = 32;
const int threads_per_row = softmax_threads_per_block / rows_per_block;
const int my_local_row = threadIdx.x / threads_per_row;
const int my_row = blockIdx.x * rows_per_block + my_local_row;
if (my_row >= total_rows) return;
const int my_id = threadIdx.x % threads_per_row;
const int entries_per_load = sizeof(LType)/sizeof(DType);
const index_t len = length == nullptr ? M : static_cast<index_t>(length[my_row]);
// Due to usage of MSHADOW_TYPE_SWITCH macro we are generating
// kernels where sizeof(LType) may be less than sizeof(DType),
// resulting in entries_per_load being 0.
// This is not a valid combination and is being checked against
// in the launcher code. This switch here is just to silence
// the division by zero warning generated for such invalid cases.
const int row_length = entries_per_load > 0 ? M / entries_per_load : 0;
const LType* out_aligned = reinterpret_cast<const LType*>(out);
const LType* ograd_aligned = reinterpret_cast<const LType*>(ograd);
size_t base = my_row * row_length;
for (index_t i = my_id; i < row_length; i += threads_per_row) {
persistent_storage[my_local_row * row_length * 2 + i] = out_aligned[base + i];
persistent_storage[my_local_row * row_length * 2 + row_length + i] = ograd_aligned[base + i];
}
DType * row = reinterpret_cast<DType *>(persistent_storage + my_local_row * row_length * 2);
__syncthreads();
AType my_sum_value;
red::sum::SetInitValue(my_sum_value);
for (index_t i = my_id; i < len; i += threads_per_row) {
my_sum_value += OP1::Map(row[i + M], row[i]);
}
scratch[threadIdx.x] = my_sum_value;
__syncthreads();
for (int size = threads_per_row / 2; size >= warp_size; size /= 2) {
if (my_id < size) {
scratch[threadIdx.x] = scratch[threadIdx.x] + scratch[threadIdx.x + size];
}
__syncthreads();
}
if (my_id < warp_size) {
AType my_value = warp_reduce(scratch[threadIdx.x],
[](AType x, AType y) { return x + y; });
scratch[threadIdx.x] = my_value;
}
__syncthreads();
AType ssum = scratch[threadIdx.x - threadIdx.x % threads_per_row];
__syncthreads();
for (index_t i = my_id; i < M; i += threads_per_row) {
const DType val =
negate ?
-OP2::Map(row[i + M], row[i], ssum) :
OP2::Map(row[i + M], row[i], ssum);
row[i] = (i < len) ? DType(val / static_cast<DType>(temperature)) :
DType(0.0f);
if (Req == kAddTo) {
row[i] += igrad[my_row * M + i];
}
}
__syncthreads();
LType* igrad_aligned = reinterpret_cast<LType*>(igrad);
for (index_t i = my_id; i < row_length; i += threads_per_row) {
igrad_aligned[base + i] = persistent_storage[my_local_row * row_length * 2 + i];
}
}
template<int x_bits, typename OP1, typename OP2, int Req, bool negate, typename AType, int ndim,
typename DType, typename OType, typename IType>
__global__ void softmax_grad_kernel(OType *out, OType *ograd, DType *igrad,
const IType *length, index_t M, int axis,
Shape<ndim> sshape, Shape<ndim> stride,
const double temperature) {
const unsigned x_size = 1 << x_bits;
__shared__ AType smem[x_size];
index_t sa = stride[axis];
index_t base = unravel_dot(blockIdx.x, sshape, stride);
index_t x = threadIdx.x;
index_t len = length != nullptr ? static_cast<index_t>(length[blockIdx.x]) : M;
red::sum::SetInitValue(smem[x]);
for (index_t i = x; i < len; i += x_size) {
smem[x] += OP1::Map(ograd[base + i*sa], out[base + i*sa]);
}
__syncthreads();
cuda::Reduce1D<red::sum, x_bits>(smem);
__syncthreads();
AType ssum = smem[0];
__syncthreads();
DType final_result;
for (index_t i = x; i < M; i += x_size) {
final_result =
negate ?
-OP2::Map(ograd[base + i*sa], out[base + i*sa], ssum) :
OP2::Map(ograd[base + i*sa], out[base + i*sa], ssum);
final_result = (i < len) ? final_result : DType(0.0f);
KERNEL_ASSIGN(igrad[base + i*sa], Req, final_result / static_cast<DType>(temperature));
}
}
template<typename OP1, typename OP2, int Req, bool negate, typename AType, int ndim,
typename DType, typename OType, typename IType>
inline void SoftmaxGrad(Stream<gpu> *s, OType *out, OType *ograd,
DType *igrad, IType *length, Shape<ndim> shape, int axis,
const double temperature) {
const int x_bits = 7;
const int x_size = 1 << x_bits;
index_t M = shape[axis];
index_t N = shape.Size()/M;
Shape<ndim> stride = calc_stride(shape);
Shape<ndim> sshape = shape;
sshape[axis] = 1;
const size_t DSize = sizeof(DType);
// Using 20 kB of shared memory for persistent storage in the optimized case
// Need to store both out and ograd, so M can be only half compared to
// forward pass.
const size_t max_opt_M = 20 * 1024 / DSize / 2;
if (stride[axis] == 1 &&
static_cast<size_t>(M) <= max_opt_M &&
std::is_same<DType, OType>::value) {
int ltype = mxnet::common::cuda::get_load_type(M * sizeof(DType));
MXNET_LOAD_TYPE_SWITCH(ltype, LType, {
int rows_per_block = mxnet::common::cuda::get_rows_per_block(M *
sizeof(DType) / sizeof(LType),
softmax_threads_per_block);
int nblocks = (N + rows_per_block - 1) / rows_per_block;
CHECK_LE(sizeof(DType), sizeof(LType));
softmax_stride1_grad_kernel<OP1, OP2, Req, negate, AType, LType>
<<<nblocks, softmax_threads_per_block, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
out, ograd, igrad, length, M, temperature, rows_per_block, N);
});
MSHADOW_CUDA_POST_KERNEL_CHECK(softmax_stride1_grad_kernel);
} else {
softmax_grad_kernel<x_bits, OP1, OP2, Req, negate, AType, ndim>
<<<N, x_size, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
out, ograd, igrad, length, M, axis, sshape, stride, temperature);
MSHADOW_CUDA_POST_KERNEL_CHECK(softmax_grad_kernel);
}
}
#endif
} // namespace mxnet_op
struct SoftmaxParam : public dmlc::Parameter<SoftmaxParam> {
int axis;
dmlc::optional<double> temperature;
dmlc::optional<int> dtype;
dmlc::optional<bool> use_length;
DMLC_DECLARE_PARAMETER(SoftmaxParam) {
DMLC_DECLARE_FIELD(axis).set_default(-1)
.describe("The axis along which to compute softmax.");
DMLC_DECLARE_FIELD(temperature).set_default(dmlc::optional<double>())
.describe("Temperature parameter in softmax");
DMLC_DECLARE_FIELD(dtype)
.add_enum("float16", mshadow::kFloat16)
.add_enum("float32", mshadow::kFloat32)
.add_enum("float64", mshadow::kFloat64)
.set_default(dmlc::optional<int>())
.describe("DType of the output in case this can't be inferred. "
"Defaults to the same as input's dtype if not defined (dtype=None).");
DMLC_DECLARE_FIELD(use_length)
.set_default(dmlc::optional<bool>(false))
.describe("Whether to use the length input as a mask over the data input.");
}
bool operator==(const SoftmaxParam& other) const {
return this->axis == other.axis &&
this->temperature == other.temperature &&
this->dtype == other.dtype &&
this->use_length == other.use_length;
}
};
static inline bool softmax_has_dtype_override(const nnvm::NodeAttrs& attrs) {
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
return param.dtype.has_value() && param.dtype.value() != -1;
}
static inline bool softmax_use_length(const nnvm::NodeAttrs& attrs) {
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
return param.use_length.value();
}
static inline bool SoftmaxOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(out_attrs->size(), 1);
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), softmax_use_length(attrs) ? 2U : 1U);
if (softmax_has_dtype_override(attrs)) {
TYPE_ASSIGN_CHECK(*out_attrs, 0, param.dtype.value());
type_assign(&(*in_attrs)[0], (*out_attrs)[0]);
return true;
} else {
std::vector<int> tmp = {in_attrs->at(0)};
return ElemwiseType<1, 1>(attrs, &tmp, out_attrs);
}
}
static inline bool SoftmaxOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
CHECK_EQ(out_attrs->size(), 1U);
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), param.use_length.value() ? 2U : 1U);
if (param.use_length.value()) {
mxnet::TShape& dshape = in_attrs->at(0);
mxnet::TShape tmp_shape((dshape.ndim() == 1) ? 1U : dshape.ndim() - 1, 1);
int j = 0;
int axis = param.axis != -1 ? param.axis : dshape.ndim() - 1;
for (int i = 0; i < dshape.ndim(); ++i) {
if (i != axis) {
tmp_shape[j++] = dshape[i];
}
}
SHAPE_ASSIGN_CHECK(*in_attrs, 1, tmp_shape);
}
mxnet::ShapeVector tmp = {in_attrs->at(0)};
return ElemwiseShape<1, 1>(attrs, &tmp, out_attrs);
}
static inline bool SoftmaxGradOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) {
if (softmax_use_length(attrs)) {
mxnet::ShapeVector ins = {in_attrs->at(0), in_attrs->at(1), in_attrs->at(3)};
mxnet::ShapeVector dgrad = {out_attrs->at(0)};
bool res = ElemwiseShape<3, 1>(attrs, &ins, &dgrad);
SHAPE_ASSIGN_CHECK(*in_attrs, 0, ins[0]);
SHAPE_ASSIGN_CHECK(*in_attrs, 1, ins[1]);
SHAPE_ASSIGN_CHECK(*in_attrs, 3, ins[2]);
SHAPE_ASSIGN_CHECK(*out_attrs, 0, dgrad[0]);
mxnet::ShapeVector length = {in_attrs->at(2)};
mxnet::ShapeVector lgrad = {out_attrs->at(1)};
res = (res && ElemwiseShape<1, 1>(attrs, &length, &lgrad));
SHAPE_ASSIGN_CHECK(*in_attrs, 2, length[0]);
SHAPE_ASSIGN_CHECK(*out_attrs, 1, lgrad[0]);
return res;
} else {
return ElemwiseShape<3, 1>(attrs, in_attrs, out_attrs);
}
} else {
return ElemwiseShape<2, 1>(attrs, in_attrs, out_attrs);
}
}
static inline bool SoftmaxGradOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(out_attrs->size(), softmax_use_length(attrs) ? 2U : 1U);
if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) {
CHECK_EQ(in_attrs->size(), softmax_use_length(attrs) ? 4U : 3U);
int in_dtype = (*in_attrs)[1];
int out_dtype = (*in_attrs)[softmax_use_length(attrs) ? 3 : 2];
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_dtype);
TYPE_ASSIGN_CHECK(*out_attrs, 0, in_dtype);
if (softmax_use_length(attrs)) {
TYPE_ASSIGN_CHECK(*out_attrs, 1, in_attrs->at(2));
}
return (*out_attrs)[0] != -1 && (*in_attrs)[0] != -1 &&
(*out_attrs)[1] != -1 && (*in_attrs)[1] != -1;
} else {
CHECK_EQ(in_attrs->size(), 2U);
int out_dtype = (*in_attrs)[1];
TYPE_ASSIGN_CHECK(*out_attrs, 0, out_dtype);
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_dtype);
return (*out_attrs)[0] != -1 && (*in_attrs)[0] != -1;
}
}
static inline std::vector<std::pair<int, int> >
SoftmaxGradOpInplaceOption(const nnvm::NodeAttrs& attrs) {
if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) {
if (softmax_use_length(attrs)) {
return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}, {2, 1}, {3, 0}};
} else {
return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}, {2, 0}};
}
} else {
return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}};
}
}
static inline uint32_t SoftmaxGradOpNumInputs(const nnvm::NodeAttrs& attrs) {
if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) {
return softmax_use_length(attrs) ? 4 : 3;
}
return 2;
}
static inline std::vector<std::string> SoftmaxGradOpInputNames(const nnvm::NodeAttrs& attrs) {
if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) {
if (softmax_use_length(attrs)) {
return std::vector<std::string>{"ograd", "data", "length", "output"};
} else {
return std::vector<std::string>{"ograd", "data", "output"};
}
} else {
return std::vector<std::string>{"ograd", "output"};
}
}
struct SoftmaxFGradient {
const char *op_name;
std::vector<nnvm::NodeEntry> operator()(const nnvm::ObjectPtr& n,
const std::vector<nnvm::NodeEntry>& ograds) const {
if (softmax_has_dtype_override(n->attrs) || softmax_use_length(n->attrs)) {
return ElemwiseGradUseInOut {op_name}(n, ograds);
} else {
return ElemwiseGradUseOut {op_name}(n, ograds);
}
}
};
template<typename xpu, typename OP, bool negate = false>
void SoftmaxCompute(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mxnet_op;
if (req[0] == kNullOp) return;
CHECK_NE(req[0], kAddTo);
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
int axis = CheckAxis(param.axis, inputs[0].ndim());
const double temperature = param.temperature.has_value() ?
param.temperature.value() : 1.0;
mxnet::TShape shape = AxisShapeCompact(inputs[0].shape_, &axis, true);
bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", false);
if (!safe_acc && inputs[0].type_flag_ == mshadow::kFloat16) {
common::LogOnce("MXNET_SAFE_ACCUMULATION=1 is recommended for softmax with float16 inputs. "
"See https://mxnet.apache.org/api/faq/env_var "
"for more details.");
}
MXNET_REAL_ACC_TYPE_SWITCH(inputs[0].type_flag_, DType, AType, {
MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, OType, {
int type = kInt32;
if (param.use_length.value()) {
CHECK(inputs.size() > 1)
<< "Mask needs to be provided when using softmax with use_length=True.";
type = inputs[1].type_flag_;
}
MXNET_INT32_INT64_TYPE_SWITCH(type, IType, {
IType* mask_ptr = nullptr;
if (param.use_length.value()) {
mask_ptr = inputs[1].dptr<IType>();
}
if (safe_acc) {
if (shape.ndim() == 2) {
Softmax<OP, negate, AType>(
ctx.get_stream<xpu>(), inputs[0].dptr<DType>(),
outputs[0].dptr<OType>(), mask_ptr, shape.get<2>(),
axis, static_cast<DType>(temperature));
} else {
Softmax<OP, negate, AType>(
ctx.get_stream<xpu>(), inputs[0].dptr<DType>(),
outputs[0].dptr<OType>(), mask_ptr, shape.get<3>(),
axis, static_cast<DType>(temperature));
}
} else {
if (shape.ndim() == 2) {
Softmax<OP, negate, DType>(
ctx.get_stream<xpu>(), inputs[0].dptr<DType>(),
outputs[0].dptr<OType>(), mask_ptr, shape.get<2>(),
axis, static_cast<DType>(temperature));
} else {
Softmax<OP, negate, DType>(
ctx.get_stream<xpu>(), inputs[0].dptr<DType>(),
outputs[0].dptr<OType>(), mask_ptr, shape.get<3>(),
axis, static_cast<DType>(temperature));
}
}
});
});
});
}
template<typename xpu, typename OP1, typename OP2, bool negate = false>
void SoftmaxGradCompute(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mxnet_op;
if (softmax_use_length(attrs)) {
MXNET_INT32_INT64_TYPE_SWITCH(inputs[2].type_flag_, IType, {
if (req[1] != kNullOp) {
mxnet_op::Kernel<mxnet_op::set_zero, xpu>::Launch(
ctx.get_stream<xpu>(), outputs[1].Size(), outputs[1].dptr<IType>());
}
});
}
if (req[0] == kNullOp) return;
const int itype = softmax_use_length(attrs) ? inputs[2].type_flag_ : kInt32;
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
int axis = CheckAxis(param.axis, inputs[0].ndim());
const double temperature = param.temperature.has_value() ?
param.temperature.value() : 1.0;
mxnet::TShape shape = AxisShapeCompact(inputs[0].shape_, &axis, true);
int out_idx = softmax_has_dtype_override(attrs) ? 2 : 1;
out_idx = softmax_use_length(attrs) ? 3 : out_idx;
bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", false);
MXNET_REAL_ACC_TYPE_SWITCH(inputs[0].type_flag_, OType, AType, {
MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MXNET_INT32_INT64_TYPE_SWITCH(itype, IType, {
IType * length_ptr = nullptr;
if (softmax_use_length(attrs)) {
length_ptr = inputs[2].dptr<IType>();
}
if (safe_acc) {
if (shape.ndim() == 2) {
SoftmaxGrad<OP1, OP2, Req, negate, AType>(
ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(),
inputs[0].dptr<OType>(), outputs[0].dptr<DType>(),
length_ptr, shape.get<2>(), axis,
static_cast<DType>(temperature));
} else {
SoftmaxGrad<OP1, OP2, Req, negate, AType>(
ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(),
inputs[0].dptr<OType>(), outputs[0].dptr<DType>(),
length_ptr, shape.get<3>(), axis,
static_cast<DType>(temperature));
}
} else {
if (shape.ndim() == 2) {
SoftmaxGrad<OP1, OP2, Req, negate, DType>(
ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(),
inputs[0].dptr<OType>(), outputs[0].dptr<DType>(),
length_ptr, shape.get<2>(), axis,
static_cast<DType>(temperature));
} else {
SoftmaxGrad<OP1, OP2, Req, negate, DType>(
ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(),
inputs[0].dptr<OType>(), outputs[0].dptr<DType>(),
length_ptr, shape.get<3>(), axis,
static_cast<DType>(temperature));
}
}
});
});
});
});
}
} // namespace op
} // namespace mxnet
namespace std {
template<>
struct hash<mxnet::op::SoftmaxParam> {
size_t operator()(const mxnet::op::SoftmaxParam& val) {
size_t ret = 0;
ret = dmlc::HashCombine(ret, val.axis);
ret = dmlc::HashCombine(ret, val.temperature);
ret = dmlc::HashCombine(ret, val.dtype);
ret = dmlc::HashCombine(ret, val.use_length);
return ret;
}
};
} // namespace std
#endif // MXNET_OPERATOR_NN_SOFTMAX_INL_H_
|
colormap.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO L OOO RRRR M M AAA PPPP %
% C O O L O O R R MM MM A A P P %
% C O O L O O RRRR M M M AAAAA PPPP %
% C O O L O O R R M M A A P %
% CCCC OOO LLLLL OOO R R M M A A P %
% %
% %
% MagickCore Colormap Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% We use linked-lists because splay-trees do not currently support duplicate
% key / value pairs (.e.g X11 green compliance and SVG green compliance).
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/attribute.h"
#include "magick/blob.h"
#include "magick/cache-view.h"
#include "magick/cache.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colormap.h"
#include "magick/client.h"
#include "magick/configure.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/image-private.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/quantize.h"
#include "magick/quantum.h"
#include "magick/semaphore.h"
#include "magick/resource_.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/token.h"
#include "magick/utility.h"
#include "magick/xml-tree.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImageColormap() allocates an image colormap and initializes
% it to a linear gray colorspace. If the image already has a colormap,
% it is replaced. AcquireImageColormap() returns MagickTrue if successful,
% otherwise MagickFalse if there is not enough memory.
%
% The format of the AcquireImageColormap method is:
%
% MagickBooleanType AcquireImageColormap(Image *image,const size_t colors)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colors: the number of colors in the image colormap.
%
*/
static inline size_t MagickMax(const size_t x,
const size_t y)
{
if (x > y)
return(x);
return(y);
}
static inline size_t MagickMin(const size_t x,
const size_t y)
{
if (x < y)
return(x);
return(y);
}
MagickExport MagickBooleanType AcquireImageColormap(Image *image,
const size_t colors)
{
register ssize_t
i;
/*
Allocate image colormap.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->colors=MagickMax(colors,2);
if (image->colormap == (PixelPacket *) NULL)
image->colormap=(PixelPacket *) AcquireQuantumMemory(image->colors,
sizeof(*image->colormap));
else
image->colormap=(PixelPacket *) ResizeQuantumMemory(image->colormap,
image->colors,sizeof(*image->colormap));
if (image->colormap == (PixelPacket *) NULL)
{
image->colors=0;
image->storage_class=DirectClass;
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
for (i=0; i < (ssize_t) image->colors; i++)
{
size_t
pixel;
pixel=(size_t) (i*(QuantumRange/(image->colors-1)));
image->colormap[i].red=(Quantum) pixel;
image->colormap[i].green=(Quantum) pixel;
image->colormap[i].blue=(Quantum) pixel;
image->colormap[i].opacity=OpaqueOpacity;
}
return(SetImageStorageClass(image,PseudoClass));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C y c l e C o l o r m a p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CycleColormap() displaces an image's colormap by a given number of
% positions. If you cycle the colormap a number of times you can produce
% a psychodelic effect.
%
% The format of the CycleColormapImage method is:
%
% MagickBooleanType CycleColormapImage(Image *image,const ssize_t displace)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o displace: displace the colormap this amount.
%
*/
MagickExport MagickBooleanType CycleColormapImage(Image *image,
const ssize_t displace)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == DirectClass)
(void) SetImageType(image,PaletteType);
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,1,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
ssize_t
index;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
index=(ssize_t) (GetPixelIndex(indexes+x)+displace) %
image->colors;
if (index < 0)
index+=(ssize_t) image->colors;
SetPixelIndex(indexes+x,index);
SetPixelRGBO(q,image->colormap+(ssize_t) index);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S o r t C o l o r m a p B y I n t e n s i t y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SortColormapByIntensity() sorts the colormap of a PseudoClass image by
% decreasing color intensity.
%
% The format of the SortColormapByIntensity method is:
%
% MagickBooleanType SortColormapByIntensity(Image *image)
%
% A description of each parameter follows:
%
% o image: A pointer to an Image structure.
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
const PixelPacket
*color_1,
*color_2;
int
intensity;
color_1=(const PixelPacket *) x;
color_2=(const PixelPacket *) y;
intensity=PixelPacketIntensity(color_2)-(int) PixelPacketIntensity(color_1);
return(intensity);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
MagickExport MagickBooleanType SortColormapByIntensity(Image *image)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
y;
unsigned short
*pixels;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
if (image->storage_class != PseudoClass)
return(MagickTrue);
/*
Allocate memory for pixel indexes.
*/
pixels=(unsigned short *) AcquireQuantumMemory((size_t) image->colors,
sizeof(*pixels));
if (pixels == (unsigned short *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Assign index values to colormap entries.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,1,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].opacity=(IndexPacket) i;
/*
Sort image colormap by decreasing color popularity.
*/
qsort((void *) image->colormap,(size_t) image->colors,
sizeof(*image->colormap),IntensityCompare);
/*
Update image colormap indexes to sorted colormap order.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
pixels[(ssize_t) image->colormap[i].opacity]=(unsigned short) i;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
IndexPacket
index;
register ssize_t
x;
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
index=(IndexPacket) pixels[(ssize_t) GetPixelIndex(indexes+x)];
SetPixelIndex(indexes+x,index);
SetPixelRGBO(q,image->colormap+(ssize_t) index);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (status == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
pixels=(unsigned short *) RelinquishMagickMemory(pixels);
return(status);
}
|
npdot.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdlib.h>
#include <string.h>
#include <complex.h>
//#include <omp.h>
#include "config.h"
#include "vhf/fblas.h"
#define MIN(X,Y) ((X) < (Y) ? (X) : (Y))
#define MAX(X,Y) ((X) > (Y) ? (X) : (Y))
/*
* numpy.dot may call unoptimized blas
*/
void NPdgemm(const char trans_a, const char trans_b,
const int m, const int n, const int k,
const int lda, const int ldb, const int ldc,
const int offseta, const int offsetb, const int offsetc,
double *a, double *b, double *c,
const double alpha, const double beta)
{
const size_t dimc = ldc;
int i, j;
if (m == 0 || n == 0) {
return;
} else if (k == 0) {
for (i = 0; i < n; i++) {
for (j = 0; j < m; j++) {
c[i*dimc+j] = 0;
} }
return;
}
a += offseta;
b += offsetb;
c += offsetc;
if ((k/m) > 3 && (k/n) > 3) { // parallelize k
if (beta == 0) {
for (i = 0; i < n; i++) {
for (j = 0; j < m; j++) {
c[i*dimc+j] = 0;
}
}
} else {
for (i = 0; i < n; i++) {
for (j = 0; j < m; j++) {
c[i*dimc+j] *= beta;
}
}
}
#pragma omp parallel private(i, j)
{
int nthread = omp_get_num_threads();
int nblk = MAX((k+nthread-1) / nthread, 1);
double D0 = 0;
double *cpriv = malloc(sizeof(double) * (m*n+2));
int di;
size_t ij;
size_t astride = nblk;
size_t bstride = nblk;
if (trans_a == 'N') {
astride *= lda;
}
if (trans_b != 'N') {
bstride *= ldb;
}
#pragma omp for
for (i = 0; i < nthread; i++) {
di = MIN(nblk, k-i*nblk);
if (di > 0) {
dgemm_(&trans_a, &trans_b, &m, &n, &di,
&alpha, a+astride*i, &lda,
b+bstride*i, &ldb,
&D0, cpriv, &m);
}
}
#pragma omp critical
if (di > 0) {
for (ij = 0, i = 0; i < n; i++) {
for (j = 0; j < m; j++, ij++) {
c[i*dimc+j] += cpriv[ij];
}
}
}
free(cpriv);
}
} else if (m > n*2) { // parallelize m
#pragma omp parallel
{
int nthread = omp_get_num_threads();
int nblk = MAX((m+nthread-1) / nthread, 1);
nthread = (m+nblk-1) / nblk;
int di;
size_t bstride = nblk;
if (trans_a != 'N') {
bstride *= lda;
}
#pragma omp for
for (i = 0; i < nthread; i++) {
di = MIN(nblk, m-i*nblk);
if (di > 0) {
dgemm_(&trans_a, &trans_b, &di, &n, &k,
&alpha, a+bstride*i, &lda, b, &ldb,
&beta, c+i*nblk, &ldc);
}
}
}
} else { // parallelize n
#pragma omp parallel
{
int nthread = omp_get_num_threads();
int nblk = MAX((n+nthread-1) / nthread, 1);
nthread = (n+nblk-1) / nblk;
int di;
size_t bstride = nblk;
size_t cstride = dimc * nblk;
if (trans_b == 'N') {
bstride *= ldb;
}
#pragma omp for
for (i = 0; i < nthread; i++) {
di = MIN(nblk, n-i*nblk);
if (di > 0) {
dgemm_(&trans_a, &trans_b, &m, &di, &k,
&alpha, a, &lda, b+bstride*i, &ldb,
&beta, c+cstride*i, &ldc);
}
}
}
}
}
void NPzgemm(const char trans_a, const char trans_b,
const int m, const int n, const int k,
const int lda, const int ldb, const int ldc,
const int offseta, const int offsetb, const int offsetc,
double complex *a, double complex *b, double complex *c,
const double complex *alpha, const double complex *beta)
{
const size_t dimc = ldc;
int i, j;
if (m == 0 || n == 0) {
return;
} else if (k == 0) {
for (i = 0; i < n; i++) {
for (j = 0; j < m; j++) {
c[i*dimc+j] = 0;
} }
return;
}
a += offseta;
b += offsetb;
c += offsetc;
if ((k/m) > 3 && (k/n) > 3) { // parallelize k
if (creal(*beta) == 0 && cimag(*beta) == 0) {
for (i = 0; i < n; i++) {
for (j = 0; j < m; j++) {
c[i*dimc+j] = 0;
}
}
} else {
for (i = 0; i < n; i++) {
for (j = 0; j < m; j++) {
c[i*dimc+j] *= beta[0];
}
}
}
#pragma omp parallel private(i, j)
{
int nthread = omp_get_num_threads();
int nblk = MAX((k+nthread-1) / nthread, 1);
double complex Z0 = 0;
double complex *cpriv = malloc(sizeof(double complex) * (m*n+2));
int di;
size_t ij;
size_t astride = nblk;
size_t bstride = nblk;
if (trans_a == 'N') {
astride *= lda;
}
if (trans_b != 'N') {
bstride *= ldb;
}
#pragma omp for
for (i = 0; i < nthread; i++) {
di = MIN(nblk, k-i*nblk);
if (di > 0) {
zgemm_(&trans_a, &trans_b, &m, &n, &di,
alpha, a+astride*i, &lda,
b+bstride*i, &ldb,
&Z0, cpriv, &m);
}
}
#pragma omp critical
if (di > 0) {
for (ij = 0, i = 0; i < n; i++) {
for (j = 0; j < m; j++, ij++) {
c[i*dimc+j] += cpriv[ij];
}
}
}
free(cpriv);
}
} else if (m > n*2) { // parallelize m
#pragma omp parallel
{
int nthread = omp_get_num_threads();
int nblk = MAX((m+nthread-1) / nthread, 1);
nthread = (m+nblk-1) / nblk;
int di;
size_t bstride = nblk;
if (trans_a != 'N') {
bstride *= lda;
}
#pragma omp for
for (i = 0; i < nthread; i++) {
di = MIN(nblk, m-i*nblk);
if (di > 0) {
zgemm_(&trans_a, &trans_b, &di, &n, &k,
alpha, a+bstride*i, &lda, b, &ldb,
beta, c+i*nblk, &ldc);
}
}
}
} else { // parallelize n
#pragma omp parallel
{
int nthread = omp_get_num_threads();
int nblk = MAX((n+nthread-1) / nthread, 1);
nthread = (n+nblk-1) / nblk;
int di;
size_t bstride = nblk;
size_t cstride = dimc * nblk;
if (trans_b == 'N') {
bstride *= ldb;
}
#pragma omp for
for (i = 0; i < nthread; i++) {
di = MIN(nblk, n-i*nblk);
if (di > 0) {
zgemm_(&trans_a, &trans_b, &m, &di, &k,
alpha, a, &lda, b+bstride*i, &ldb,
beta, c+cstride*i, &ldc);
}
}
}
}
}
|
imd_main_risc_3d.c |
/******************************************************************************
*
* IMD -- The ITAP Molecular Dynamics Program
*
* Copyright 1996-2011 Institute for Theoretical and Applied Physics,
* University of Stuttgart, D-70550 Stuttgart
*
******************************************************************************/
/******************************************************************************
*
* imd_main_risc_3d.c -- main loop, risc specific part, three dimensions
*
******************************************************************************/
/******************************************************************************
* $Revision$
* $Date$
******************************************************************************/
#include "imd.h"
/*****************************************************************************
*
* calc_forces
*
*****************************************************************************/
void calc_forces(int steps)
{
int n, k;
/* clear global accumulation variables */
tot_pot_energy = 0.0;
virial = 0.0;
vir_xx = 0.0;
vir_yy = 0.0;
vir_zz = 0.0;
vir_yz = 0.0;
vir_zx = 0.0;
vir_xy = 0.0;
nfc++;
/* clear per atom accumulation variables */
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (k=0; k<ncells; ++k) {
int i;
cell *p;
p = cell_array + k;
for (i=0; i<p->n; ++i) {
KRAFT(p,i,X) = 0.0;
KRAFT(p,i,Y) = 0.0;
KRAFT(p,i,Z) = 0.0;
#ifdef UNIAX
DREH_MOMENT(p,i,X) = 0.0;
DREH_MOMENT(p,i,Y) = 0.0;
DREH_MOMENT(p,i,Z) = 0.0;
#endif
#if defined(STRESS_TENS)
PRESSTENS(p,i,xx) = 0.0;
PRESSTENS(p,i,yy) = 0.0;
PRESSTENS(p,i,zz) = 0.0;
PRESSTENS(p,i,yz) = 0.0;
PRESSTENS(p,i,zx) = 0.0;
PRESSTENS(p,i,xy) = 0.0;
#endif
#ifndef MONOLJ
POTENG(p,i) = 0.0;
#endif
#ifdef CNA
if (cna)
MARK(p,i) = 0;
#endif
#ifdef NNBR
NBANZ(p,i) = 0;
#endif
#ifdef COVALENT
NEIGH(p,i)->n = 0;
#endif
#ifdef EAM2
EAM_RHO(p,i) = 0.0; /* zero host electron density at atom site */
#ifdef EEAM
EAM_P(p,i) = 0.0; /* zero host electron density at atom site */
#endif
#endif
}
}
#ifdef RIGID
/* clear total forces */
if ( nsuperatoms>0 )
for(k=0; k<nsuperatoms; k++) {
superforce[k].x = 0.0;
superforce[k].y = 0.0;
superforce[k].z = 0.0;
}
#endif
#ifdef EWALD
if (steps==0) {
ewald_time.total = 0.0;
imd_start_timer( &ewald_time );
}
#endif
/* compute forces for all pairs of cells */
for (n=0; n<nlists; ++n) {
#ifdef _OPENMP
#pragma omp parallel for schedule(runtime) \
reduction(+:tot_pot_energy,virial,vir_xx,vir_yy,vir_zz,vir_yz,vir_zx,vir_xy)
#endif
for (k=0; k<npairs[n]; ++k) {
vektor pbc;
pair *P;
P = pairs[n]+k;
pbc.x = P->ipbc[0]*box_x.x + P->ipbc[1]*box_y.x + P->ipbc[2]*box_z.x;
pbc.y = P->ipbc[0]*box_x.y + P->ipbc[1]*box_y.y + P->ipbc[2]*box_z.y;
pbc.z = P->ipbc[0]*box_x.z + P->ipbc[1]*box_y.z + P->ipbc[2]*box_z.z;
do_forces(cell_array + P->np, cell_array + P->nq, pbc,
&tot_pot_energy, &virial, &vir_xx, &vir_yy, &vir_zz,
&vir_yz, &vir_zx, &vir_xy);
}
}
#ifdef EWALD
if (steps==0) {
imd_stop_timer( &ewald_time );
}
#endif
#ifdef EAM2
/* compute embedding energy and its derivative */
do_embedding_energy();
for (n=0; n<nlists; ++n) {
#ifdef _OPENMP
#pragma omp parallel for schedule(runtime) \
reduction(+:virial,vir_xx,vir_yy,vir_zz,vir_yz,vir_zx,vir_xy)
#endif
for (k=0; k<npairs[n]; ++k) {
vektor pbc;
pair *P;
P = pairs[n]+k;
pbc.x = P->ipbc[0]*box_x.x + P->ipbc[1]*box_y.x + P->ipbc[2]*box_z.x;
pbc.y = P->ipbc[0]*box_x.y + P->ipbc[1]*box_y.y + P->ipbc[2]*box_z.y;
pbc.z = P->ipbc[0]*box_x.z + P->ipbc[1]*box_y.z + P->ipbc[2]*box_z.z;
do_forces_eam2(cell_array + P->np, cell_array + P->nq, pbc,
&virial, &vir_xx, &vir_yy, &vir_zz, &vir_yz, &vir_zx, &vir_xy);
}
}
#endif
#if defined(COVALENT) && !defined(CNA)
/* does not work correctly - different threads may write to same variables
#ifdef _OPENMP
#pragma omp parallel for schedule(runtime) \
reduction(+:tot_pot_energy,virial,vir_xx,vir_yy,vir_zz,vir_yz,vir_zx,vir_xy)
#endif
*/
for (k=0; k<ncells; ++k) {
do_forces2(cell_array+k, &tot_pot_energy, &virial,
&vir_xx, &vir_yy, &vir_zz, &vir_yz, &vir_zx, &vir_xy);
}
#endif
#ifdef EWALD
do_forces_ewald(steps);
#endif
}
/******************************************************************************
*
* fix_cells
*
* check if each atom is in the correct cell;
* move atoms that have left their cells
*
******************************************************************************/
void fix_cells(void)
{
int i,j,k,l,clone;
cell *p, *q;
ivektor coord, lcoord;
/* apply periodic boundary conditions */
do_boundaries();
/* for each cell in bulk */
for (i=cellmin.x; i < cellmax.x; ++i)
for (j=cellmin.y; j < cellmax.y; ++j)
for (k=cellmin.z; k < cellmax.z; ++k) {
p = PTR_3D_V(cell_array, i, j, k, cell_dim);
/*printf(" cell %d %d %d \n",i,j,k);fflush(stdout);*/
/* loop over atoms in cell */
l=0;
while( l<p->n ) {
coord = cell_coord( ORT(p,l,X), ORT(p,l,Y), ORT(p,l,Z) );
q = PTR_3D_VV(cell_array,coord,cell_dim);
/* if it's in the wrong cell, move it to the right cell */
if (p != q) {
MOVE_ATOM(q,p,l);
#ifdef CLONE
if (l < p->n-nclones)
for (clone=1; clone<nclones; clone++)
MOVE_ATOM(q, p, l+clone);
else /* we are dealing with the last in the stack */
for (clone=1; clone<nclones; clone++)
MOVE_ATOM(q, p, l);
#endif
}
else ++l;
}
}
}
|
nbody_parallel.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "timer.h"
#include <omp.h>
#define DIM 2 /* Two-dimensional system */
#define X 0 /* x-coordinate subscript */
#define Y 1 /* y-coordinate subscript */
const double G = 6.673e-11;
typedef double vect_t[DIM]; /* Vector type for position, etc. */
// vect_t forces_reduction[4999][5000];
struct particle_s
{
double m; /* Mass */
vect_t s; /* Position */
vect_t v; /* Velocity */
};
void Usage(char *prog_name);
void Get_args(int argc, char *argv[], int *n_p, int *n_steps_p,
double *delta_t_p, int *output_freq_p, char *g_i_p);
void Get_init_cond(struct particle_s curr[], int n);
void Gen_init_cond(struct particle_s curr[], int n);
void Output_state(double time, struct particle_s curr[], int n);
void Compute_force(int part, vect_t forces[], struct particle_s curr[],
int n);
void Update_part(int part, vect_t forces[], struct particle_s curr[],
int n, double delta_t);
void Compute_energy(struct particle_s curr[], int n, double *kin_en_p,
double *pot_en_p);
int main(int argc, char *argv[])
{
int n; /* Number of particles */
int n_steps; /* Number of timesteps */
int step; /* Current step */
int part; /* Current particle */
int output_freq; /* Frequency of output */
double delta_t; /* Size of timestep */
double t; /* Current Time */
struct particle_s *curr; /* Current state of system */
vect_t *forces; /* Forces on each particle */
char g_i; /*_G_en or _i_nput init conds */
double kinetic_energy, potential_energy;
double start, finish; /* For timings */
Get_args(argc, argv, &n, &n_steps, &delta_t, &output_freq, &g_i);
curr = malloc(n * sizeof(struct particle_s));
forces = malloc(n * sizeof(vect_t));
if (g_i == 'i')
Get_init_cond(curr, n);
else
Gen_init_cond(curr, n);
GET_TIME(start);
Compute_energy(curr, n, &kinetic_energy, &potential_energy);
printf(" PE = %e, KE = %e, Total Energy = %e\n",
potential_energy, kinetic_energy, kinetic_energy + potential_energy);
Output_state(0, curr, n);
for (step = 1; step <= n_steps; step++)
{
memset(forces, 0, n * sizeof(vect_t));
for (part = 0; part < n - 1; part++)
Compute_force(part, forces, curr, n);
// #pragma omp parallel for
// for (part = 0; part < n - 1; part++)
// {
// memset(forces_reduction[part], 0, n * sizeof(vect_t));
// Compute_force(part, forces_reduction[part], curr, n);
// }
// #pragma omp parallel for
// for (int i = 0; i < n - 1; i++)
// {
// for (int j = 0; j < n; j++)
// {
// #pragma omp atomic
// forces[j][X] += forces_reduction[i][j][X];
// #pragma omp atomic
// forces[j][Y] += forces_reduction[i][j][Y];
// }
// }
#pragma omp parallel for
for (part = 0; part < n; part++)
{
Update_part(part, forces, curr, n, delta_t);
}
t = n_steps * delta_t;
Compute_energy(curr, n, &kinetic_energy, &potential_energy);
}
Output_state(t, curr, n);
printf(" PE = %e, KE = %e, Total Energy = %e\n",
potential_energy, kinetic_energy, kinetic_energy + potential_energy);
GET_TIME(finish);
printf("Elapsed time = %e seconds\n", finish - start);
free(curr);
free(forces);
return 0;
} /* main */
void Usage(char *prog_name)
{
fprintf(stderr, "usage: %s <number of particles> <number of timesteps>\n",
prog_name);
fprintf(stderr, " <size of timestep> <output frequency>\n");
fprintf(stderr, " <g|i>\n");
fprintf(stderr, " 'g': program should generate init conds\n");
fprintf(stderr, " 'i': program should get init conds from stdin\n");
exit(0);
} /* Usage */
void Get_args(int argc, char *argv[], int *n_p, int *n_steps_p,
double *delta_t_p, int *output_freq_p, char *g_i_p)
{
if (argc != 6)
Usage(argv[0]);
*n_p = strtol(argv[1], NULL, 10);
*n_steps_p = strtol(argv[2], NULL, 10);
*delta_t_p = strtod(argv[3], NULL);
*output_freq_p = strtol(argv[4], NULL, 10);
*g_i_p = argv[5][0];
if (*n_p <= 0 || *n_steps_p < 0 || *delta_t_p <= 0)
Usage(argv[0]);
if (*g_i_p != 'g' && *g_i_p != 'i')
Usage(argv[0]);
} /* Get_args */
void Get_init_cond(struct particle_s curr[], int n)
{
int part;
printf("For each particle, enter (in order):\n");
printf(" its mass, its x-coord, its y-coord, ");
printf("its x-velocity, its y-velocity\n");
for (part = 0; part < n; part++)
{
scanf("%lf", &curr[part].m);
scanf("%lf", &curr[part].s[X]);
scanf("%lf", &curr[part].s[Y]);
scanf("%lf", &curr[part].v[X]);
scanf("%lf", &curr[part].v[Y]);
}
} /* Get_init_cond */
void Gen_init_cond(struct particle_s curr[], int n)
{
int part;
double mass = 5.0e24;
double gap = 1.0e5;
double speed = 3.0e4;
srand(1);
#pragma omp parallel for
for (part = 0; part < n; part++)
{
curr[part].m = mass;
curr[part].s[X] = part * gap;
curr[part].s[Y] = 0.0;
curr[part].v[X] = 0.0;
if (part % 2 == 0)
curr[part].v[Y] = speed;
else
curr[part].v[Y] = -speed;
}
} /* Gen_init_cond */
void Output_state(double time, struct particle_s curr[], int n)
{
int part;
printf("%.2f\n", time);
for (part = 0; part < n; part++)
{
printf("%3d %10.3e ", part, curr[part].s[X]);
printf(" %10.3e ", curr[part].s[Y]);
printf(" %10.3e ", curr[part].v[X]);
printf(" %10.3e\n", curr[part].v[Y]);
}
printf("\n");
} /* Output_state */
void Compute_force(int part, vect_t forces[], struct particle_s curr[],
int n)
{
int k;
double mg;
vect_t f_part_k;
double len, len_3, fact;
double forces_part_x = 0.0;
double forces_part_y = 0.0;
#pragma omp parallel for private(f_part_k, len, len_3, mg, fact) reduction(+:forces_part_x,forces_part_y)
for (k = part + 1; k < n; k++)
{
f_part_k[X] = curr[part].s[X] - curr[k].s[X];
f_part_k[Y] = curr[part].s[Y] - curr[k].s[Y];
len = sqrt(f_part_k[X] * f_part_k[X] + f_part_k[Y] * f_part_k[Y]);
len_3 = len * len * len;
mg = -G * curr[part].m * curr[k].m;
fact = mg / len_3;
f_part_k[X] *= fact;
f_part_k[Y] *= fact;
// forces[part][X] += f_part_k[X];
forces_part_x += f_part_k[X];
// forces[part][Y] += f_part_k[Y];
forces_part_y += f_part_k[Y];
forces[k][X] -= f_part_k[X];
forces[k][Y] -= f_part_k[Y];
}
forces[part][X] += forces_part_x;
forces[part][Y] += forces_part_y;
} /* Compute_force */
void Update_part(int part, vect_t forces[], struct particle_s curr[],
int n, double delta_t)
{
double fact = delta_t / curr[part].m;
curr[part].s[X] += delta_t * curr[part].v[X];
curr[part].s[Y] += delta_t * curr[part].v[Y];
curr[part].v[X] += fact * forces[part][X];
curr[part].v[Y] += fact * forces[part][Y];
} /* Update_part */
void Compute_energy(struct particle_s curr[], int n, double *kin_en_p,
double *pot_en_p)
{
int i, j;
vect_t diff;
double pe = 0.0, ke = 0.0;
double dist, speed_sqr;
#pragma omp parallel
{
#pragma omp for reduction(+:ke) private(speed_sqr) nowait
for (i = 0; i < n; i++)
{
speed_sqr = curr[i].v[X] * curr[i].v[X] + curr[i].v[Y] * curr[i].v[Y];
ke += curr[i].m * speed_sqr;
}
#pragma omp for private(j, dist, diff) reduction(+:pe)
for (i = 0; i < n - 1; i++)
{
for (j = i + 1; j < n; j++)
{
diff[X] = curr[i].s[X] - curr[j].s[X];
diff[Y] = curr[i].s[Y] - curr[j].s[Y];
dist = sqrt(diff[X] * diff[X] + diff[Y] * diff[Y]);
pe += -G * curr[i].m * curr[j].m / dist;
}
}
#pragma omp single
ke *= 0.5;
} // parallel
*kin_en_p = ke;
*pot_en_p = pe;
} /* Compute_energy */
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 16;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 24;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
StmtOpenMP.h | //===- StmtOpenMP.h - Classes for OpenMP directives ------------*- C++ -*-===//
//
// The LLVM37 Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
/// \file
/// \brief This file defines OpenMP AST classes for executable directives and
/// clauses.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM37_CLANG_AST_STMTOPENMP_H
#define LLVM37_CLANG_AST_STMTOPENMP_H
#include "clang/AST/Expr.h"
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/Stmt.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/SourceLocation.h"
namespace clang {
//===----------------------------------------------------------------------===//
// AST classes for directives.
// //
///////////////////////////////////////////////////////////////////////////////
/// \brief This is a basic class for representing single OpenMP executable
/// directive.
///
class OMPExecutableDirective : public Stmt {
friend class ASTStmtReader;
/// \brief Kind of the directive.
OpenMPDirectiveKind Kind;
/// \brief Starting location of the directive (directive keyword).
SourceLocation StartLoc;
/// \brief Ending location of the directive.
SourceLocation EndLoc;
/// \brief Numbers of clauses.
const unsigned NumClauses;
/// \brief Number of child expressions/stmts.
const unsigned NumChildren;
/// \brief Offset from this to the start of clauses.
/// There are NumClauses pointers to clauses, they are followed by
/// NumChildren pointers to child stmts/exprs (if the directive type
/// requires an associated stmt, then it has to be the first of them).
const unsigned ClausesOffset;
/// \brief Get the clauses storage.
MutableArrayRef<OMPClause *> getClauses() {
OMPClause **ClauseStorage = reinterpret_cast<OMPClause **>(
reinterpret_cast<char *>(this) + ClausesOffset);
return MutableArrayRef<OMPClause *>(ClauseStorage, NumClauses);
}
protected:
/// \brief Build instance of directive of class \a K.
///
/// \param SC Statement class.
/// \param K Kind of OpenMP directive.
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending location of the directive.
///
template <typename T>
OMPExecutableDirective(const T *, StmtClass SC, OpenMPDirectiveKind K,
SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses, unsigned NumChildren)
: Stmt(SC), Kind(K), StartLoc(std::move(StartLoc)),
EndLoc(std::move(EndLoc)), NumClauses(NumClauses),
NumChildren(NumChildren),
ClausesOffset(llvm37::RoundUpToAlignment(sizeof(T),
llvm37::alignOf<OMPClause *>())) {}
/// \brief Sets the list of variables for this clause.
///
/// \param Clauses The list of clauses for the directive.
///
void setClauses(ArrayRef<OMPClause *> Clauses);
/// \brief Set the associated statement for the directive.
///
/// /param S Associated statement.
///
void setAssociatedStmt(Stmt *S) {
assert(hasAssociatedStmt() && "no associated statement.");
*child_begin() = S;
}
public:
/// \brief Iterates over a filtered subrange of clauses applied to a
/// directive.
///
/// This iterator visits only those declarations that meet some run-time
/// criteria.
template <class FilterPredicate> class filtered_clause_iterator {
protected:
ArrayRef<OMPClause *>::const_iterator Current;
ArrayRef<OMPClause *>::const_iterator End;
FilterPredicate Pred;
void SkipToNextClause() {
while (Current != End && !Pred(*Current))
++Current;
}
public:
typedef const OMPClause *value_type;
filtered_clause_iterator() : Current(), End() {}
filtered_clause_iterator(ArrayRef<OMPClause *> Arr, FilterPredicate Pred)
: Current(Arr.begin()), End(Arr.end()), Pred(std::move(Pred)) {
SkipToNextClause();
}
value_type operator*() const { return *Current; }
value_type operator->() const { return *Current; }
filtered_clause_iterator &operator++() {
++Current;
SkipToNextClause();
return *this;
}
filtered_clause_iterator operator++(int) {
filtered_clause_iterator tmp(*this);
++(*this);
return tmp;
}
bool operator!() { return Current == End; }
explicit operator bool() { return Current != End; }
bool empty() const { return Current == End; }
};
template <typename Fn>
filtered_clause_iterator<Fn> getFilteredClauses(Fn &&fn) const {
return filtered_clause_iterator<Fn>(clauses(), std::move(fn));
}
struct ClauseKindFilter {
OpenMPClauseKind Kind;
bool operator()(const OMPClause *clause) const {
return clause->getClauseKind() == Kind;
}
};
filtered_clause_iterator<ClauseKindFilter>
getClausesOfKind(OpenMPClauseKind Kind) const {
return getFilteredClauses(ClauseKindFilter{Kind});
}
/// \brief Gets a single clause of the specified kind \a K associated with the
/// current directive iff there is only one clause of this kind (and assertion
/// is fired if there is more than one clause is associated with the
/// directive). Returns nullptr if no clause of kind \a K is associated with
/// the directive.
const OMPClause *getSingleClause(OpenMPClauseKind K) const;
/// \brief Returns starting location of directive kind.
SourceLocation getLocStart() const { return StartLoc; }
/// \brief Returns ending location of directive.
SourceLocation getLocEnd() const { return EndLoc; }
/// \brief Set starting location of directive kind.
///
/// \param Loc New starting location of directive.
///
void setLocStart(SourceLocation Loc) { StartLoc = Loc; }
/// \brief Set ending location of directive.
///
/// \param Loc New ending location of directive.
///
void setLocEnd(SourceLocation Loc) { EndLoc = Loc; }
/// \brief Get number of clauses.
unsigned getNumClauses() const { return NumClauses; }
/// \brief Returns specified clause.
///
/// \param i Number of clause.
///
OMPClause *getClause(unsigned i) const { return clauses()[i]; }
/// \brief Returns true if directive has associated statement.
bool hasAssociatedStmt() const { return NumChildren > 0; }
/// \brief Returns statement associated with the directive.
Stmt *getAssociatedStmt() const {
assert(hasAssociatedStmt() && "no associated statement.");
return const_cast<Stmt *>(*child_begin());
}
OpenMPDirectiveKind getDirectiveKind() const { return Kind; }
static bool classof(const Stmt *S) {
return S->getStmtClass() >= firstOMPExecutableDirectiveConstant &&
S->getStmtClass() <= lastOMPExecutableDirectiveConstant;
}
child_range children() {
if (!hasAssociatedStmt())
return child_range();
Stmt **ChildStorage = reinterpret_cast<Stmt **>(getClauses().end());
return child_range(ChildStorage, ChildStorage + NumChildren);
}
ArrayRef<OMPClause *> clauses() { return getClauses(); }
ArrayRef<OMPClause *> clauses() const {
return const_cast<OMPExecutableDirective *>(this)->getClauses();
}
};
/// \brief This represents '#pragma omp parallel' directive.
///
/// \code
/// #pragma omp parallel private(a,b) reduction(+: c,d)
/// \endcode
/// In this example directive '#pragma omp parallel' has clauses 'private'
/// with the variables 'a' and 'b' and 'reduction' with operator '+' and
/// variables 'c' and 'd'.
///
class OMPParallelDirective : public OMPExecutableDirective {
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending Location of the directive.
///
OMPParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPParallelDirectiveClass, OMPD_parallel,
StartLoc, EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPParallelDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPParallelDirectiveClass, OMPD_parallel,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement associated with the directive.
///
static OMPParallelDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a N clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPParallelDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelDirectiveClass;
}
};
/// \brief This is a common base class for loop directives ('omp simd', 'omp
/// for', 'omp for simd' etc.). It is responsible for the loop code generation.
///
class OMPLoopDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Number of collapsed loops as specified by 'collapse' clause.
unsigned CollapsedNum;
/// \brief Offsets to the stored exprs.
/// This enumeration contains offsets to all the pointers to children
/// expressions stored in OMPLoopDirective.
/// The first 9 children are nesessary for all the loop directives, and
/// the next 7 are specific to the worksharing ones.
/// After the fixed children, three arrays of length CollapsedNum are
/// allocated: loop counters, their updates and final values.
///
enum {
AssociatedStmtOffset = 0,
IterationVariableOffset = 1,
LastIterationOffset = 2,
CalcLastIterationOffset = 3,
PreConditionOffset = 4,
CondOffset = 5,
InitOffset = 6,
IncOffset = 7,
// The '...End' enumerators do not correspond to child expressions - they
// specify the offset to the end (and start of the following counters/
// updates/finals arrays).
DefaultEnd = 8,
// The following 7 exprs are used by worksharing loops only.
IsLastIterVariableOffset = 8,
LowerBoundVariableOffset = 9,
UpperBoundVariableOffset = 10,
StrideVariableOffset = 11,
EnsureUpperBoundOffset = 12,
NextLowerBoundOffset = 13,
NextUpperBoundOffset = 14,
// Offset to the end (and start of the following counters/updates/finals
// arrays) for worksharing loop directives.
WorksharingEnd = 15,
};
/// \brief Get the counters storage.
MutableArrayRef<Expr *> getCounters() {
Expr **Storage = reinterpret_cast<Expr **>(
&(*(std::next(child_begin(), getArraysOffset(getDirectiveKind())))));
return MutableArrayRef<Expr *>(Storage, CollapsedNum);
}
/// \brief Get the updates storage.
MutableArrayRef<Expr *> getInits() {
Expr **Storage = reinterpret_cast<Expr **>(
&*std::next(child_begin(),
getArraysOffset(getDirectiveKind()) + CollapsedNum));
return MutableArrayRef<Expr *>(Storage, CollapsedNum);
}
/// \brief Get the updates storage.
MutableArrayRef<Expr *> getUpdates() {
Expr **Storage = reinterpret_cast<Expr **>(
&*std::next(child_begin(),
getArraysOffset(getDirectiveKind()) + 2 * CollapsedNum));
return MutableArrayRef<Expr *>(Storage, CollapsedNum);
}
/// \brief Get the final counter updates storage.
MutableArrayRef<Expr *> getFinals() {
Expr **Storage = reinterpret_cast<Expr **>(
&*std::next(child_begin(),
getArraysOffset(getDirectiveKind()) + 3 * CollapsedNum));
return MutableArrayRef<Expr *>(Storage, CollapsedNum);
}
protected:
/// \brief Build instance of loop directive of class \a Kind.
///
/// \param SC Statement class.
/// \param Kind Kind of OpenMP directive.
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed loops from 'collapse' clause.
/// \param NumClauses Number of clauses.
/// \param NumSpecialChildren Number of additional directive-specific stmts.
///
template <typename T>
OMPLoopDirective(const T *That, StmtClass SC, OpenMPDirectiveKind Kind,
SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses,
unsigned NumSpecialChildren = 0)
: OMPExecutableDirective(That, SC, Kind, StartLoc, EndLoc, NumClauses,
numLoopChildren(CollapsedNum, Kind) +
NumSpecialChildren),
CollapsedNum(CollapsedNum) {}
/// \brief Offset to the start of children expression arrays.
static unsigned getArraysOffset(OpenMPDirectiveKind Kind) {
return isOpenMPWorksharingDirective(Kind) ? WorksharingEnd
: DefaultEnd;
}
/// \brief Children number.
static unsigned numLoopChildren(unsigned CollapsedNum,
OpenMPDirectiveKind Kind) {
return getArraysOffset(Kind) +
4 * CollapsedNum; // Counters, Inits, Updates and Finals
}
void setIterationVariable(Expr *IV) {
*std::next(child_begin(), IterationVariableOffset) = IV;
}
void setLastIteration(Expr *LI) {
*std::next(child_begin(), LastIterationOffset) = LI;
}
void setCalcLastIteration(Expr *CLI) {
*std::next(child_begin(), CalcLastIterationOffset) = CLI;
}
void setPreCond(Expr *PC) {
*std::next(child_begin(), PreConditionOffset) = PC;
}
void setCond(Expr *Cond) {
*std::next(child_begin(), CondOffset) = Cond;
}
void setInit(Expr *Init) { *std::next(child_begin(), InitOffset) = Init; }
void setInc(Expr *Inc) { *std::next(child_begin(), IncOffset) = Inc; }
void setIsLastIterVariable(Expr *IL) {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
*std::next(child_begin(), IsLastIterVariableOffset) = IL;
}
void setLowerBoundVariable(Expr *LB) {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
*std::next(child_begin(), LowerBoundVariableOffset) = LB;
}
void setUpperBoundVariable(Expr *UB) {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
*std::next(child_begin(), UpperBoundVariableOffset) = UB;
}
void setStrideVariable(Expr *ST) {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
*std::next(child_begin(), StrideVariableOffset) = ST;
}
void setEnsureUpperBound(Expr *EUB) {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
*std::next(child_begin(), EnsureUpperBoundOffset) = EUB;
}
void setNextLowerBound(Expr *NLB) {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
*std::next(child_begin(), NextLowerBoundOffset) = NLB;
}
void setNextUpperBound(Expr *NUB) {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
*std::next(child_begin(), NextUpperBoundOffset) = NUB;
}
void setCounters(ArrayRef<Expr *> A);
void setInits(ArrayRef<Expr *> A);
void setUpdates(ArrayRef<Expr *> A);
void setFinals(ArrayRef<Expr *> A);
public:
/// \brief The expressions built for the OpenMP loop CodeGen for the
/// whole collapsed loop nest.
struct HelperExprs {
/// \brief Loop iteration variable.
Expr *IterationVarRef;
/// \brief Loop last iteration number.
Expr *LastIteration;
/// \brief Loop number of iterations.
Expr *NumIterations;
/// \brief Calculation of last iteration.
Expr *CalcLastIteration;
/// \brief Loop pre-condition.
Expr *PreCond;
/// \brief Loop condition.
Expr *Cond;
/// \brief Loop iteration variable init.
Expr *Init;
/// \brief Loop increment.
Expr *Inc;
/// \brief IsLastIteration - local flag variable passed to runtime.
Expr *IL;
/// \brief LowerBound - local variable passed to runtime.
Expr *LB;
/// \brief UpperBound - local variable passed to runtime.
Expr *UB;
/// \brief Stride - local variable passed to runtime.
Expr *ST;
/// \brief EnsureUpperBound -- expression LB = min(LB, NumIterations).
Expr *EUB;
/// \brief Update of LowerBound for statically sheduled 'omp for' loops.
Expr *NLB;
/// \brief Update of UpperBound for statically sheduled 'omp for' loops.
Expr *NUB;
/// \brief Counters Loop counters.
SmallVector<Expr *, 4> Counters;
/// \brief Expressions for loop counters inits for CodeGen.
SmallVector<Expr *, 4> Inits;
/// \brief Expressions for loop counters update for CodeGen.
SmallVector<Expr *, 4> Updates;
/// \brief Final loop counter values for GodeGen.
SmallVector<Expr *, 4> Finals;
/// \brief Check if all the expressions are built (does not check the
/// worksharing ones).
bool builtAll() {
return IterationVarRef != nullptr && LastIteration != nullptr &&
NumIterations != nullptr && PreCond != nullptr &&
Cond != nullptr && Init != nullptr && Inc != nullptr;
}
/// \brief Initialize all the fields to null.
/// \param Size Number of elements in the counters/finals/updates arrays.
void clear(unsigned Size) {
IterationVarRef = nullptr;
LastIteration = nullptr;
CalcLastIteration = nullptr;
PreCond = nullptr;
Cond = nullptr;
Init = nullptr;
Inc = nullptr;
IL = nullptr;
LB = nullptr;
UB = nullptr;
ST = nullptr;
EUB = nullptr;
NLB = nullptr;
NUB = nullptr;
Counters.resize(Size);
Inits.resize(Size);
Updates.resize(Size);
Finals.resize(Size);
for (unsigned i = 0; i < Size; ++i) {
Counters[i] = nullptr;
Inits[i] = nullptr;
Updates[i] = nullptr;
Finals[i] = nullptr;
}
}
};
/// \brief Get number of collapsed loops.
unsigned getCollapsedNumber() const { return CollapsedNum; }
Expr *getIterationVariable() const {
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), IterationVariableOffset)));
}
Expr *getLastIteration() const {
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), LastIterationOffset)));
}
Expr *getCalcLastIteration() const {
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), CalcLastIterationOffset)));
}
Expr *getPreCond() const {
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), PreConditionOffset)));
}
Expr *getCond() const {
return const_cast<Expr *>(
reinterpret_cast<const Expr *>(*std::next(child_begin(), CondOffset)));
}
Expr *getInit() const {
return const_cast<Expr *>(
reinterpret_cast<const Expr *>(*std::next(child_begin(), InitOffset)));
}
Expr *getInc() const {
return const_cast<Expr *>(
reinterpret_cast<const Expr *>(*std::next(child_begin(), IncOffset)));
}
Expr *getIsLastIterVariable() const {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), IsLastIterVariableOffset)));
}
Expr *getLowerBoundVariable() const {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), LowerBoundVariableOffset)));
}
Expr *getUpperBoundVariable() const {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), UpperBoundVariableOffset)));
}
Expr *getStrideVariable() const {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), StrideVariableOffset)));
}
Expr *getEnsureUpperBound() const {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), EnsureUpperBoundOffset)));
}
Expr *getNextLowerBound() const {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), NextLowerBoundOffset)));
}
Expr *getNextUpperBound() const {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), NextUpperBoundOffset)));
}
const Stmt *getBody() const {
// This relies on the loop form is already checked by Sema.
Stmt *Body = getAssociatedStmt()->IgnoreContainers(true);
Body = cast<ForStmt>(Body)->getBody();
for (unsigned Cnt = 1; Cnt < CollapsedNum; ++Cnt) {
Body = Body->IgnoreContainers();
Body = cast<ForStmt>(Body)->getBody();
}
return Body;
}
ArrayRef<Expr *> counters() { return getCounters(); }
ArrayRef<Expr *> counters() const {
return const_cast<OMPLoopDirective *>(this)->getCounters();
}
ArrayRef<Expr *> inits() { return getInits(); }
ArrayRef<Expr *> inits() const {
return const_cast<OMPLoopDirective *>(this)->getInits();
}
ArrayRef<Expr *> updates() { return getUpdates(); }
ArrayRef<Expr *> updates() const {
return const_cast<OMPLoopDirective *>(this)->getUpdates();
}
ArrayRef<Expr *> finals() { return getFinals(); }
ArrayRef<Expr *> finals() const {
return const_cast<OMPLoopDirective *>(this)->getFinals();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSimdDirectiveClass ||
T->getStmtClass() == OMPForDirectiveClass ||
T->getStmtClass() == OMPForSimdDirectiveClass ||
T->getStmtClass() == OMPParallelForDirectiveClass ||
T->getStmtClass() == OMPParallelForSimdDirectiveClass;
}
};
/// \brief This represents '#pragma omp simd' directive.
///
/// \code
/// #pragma omp simd private(a,b) linear(i,j:s) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp simd' has clauses 'private'
/// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and
/// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'.
///
class OMPSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPSimdDirectiveClass, OMPD_simd, StartLoc,
EndLoc, CollapsedNum, NumClauses) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPSimdDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPSimdDirectiveClass, OMPD_simd,
SourceLocation(), SourceLocation(), CollapsedNum,
NumClauses) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPSimdDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt,
const HelperExprs &Exprs);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSimdDirectiveClass;
}
};
/// \brief This represents '#pragma omp for' directive.
///
/// \code
/// #pragma omp for private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp for' has clauses 'private' with the
/// variables 'a' and 'b' and 'reduction' with operator '+' and variables 'c'
/// and 'd'.
///
class OMPForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPForDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPForDirectiveClass, OMPD_for, StartLoc, EndLoc,
CollapsedNum, NumClauses) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPForDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPForDirectiveClass, OMPD_for, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPForDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt,
const HelperExprs &Exprs);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPForDirectiveClass;
}
};
/// \brief This represents '#pragma omp for simd' directive.
///
/// \code
/// #pragma omp for simd private(a,b) linear(i,j:s) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp for simd' has clauses 'private'
/// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and
/// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'.
///
class OMPForSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPForSimdDirectiveClass, OMPD_for_simd,
StartLoc, EndLoc, CollapsedNum, NumClauses) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPForSimdDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPForSimdDirectiveClass, OMPD_for_simd,
SourceLocation(), SourceLocation(), CollapsedNum,
NumClauses) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPForSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPForSimdDirectiveClass;
}
};
/// \brief This represents '#pragma omp sections' directive.
///
/// \code
/// #pragma omp sections private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp sections' has clauses 'private' with
/// the variables 'a' and 'b' and 'reduction' with operator '+' and variables
/// 'c' and 'd'.
///
class OMPSectionsDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPSectionsDirectiveClass, OMPD_sections,
StartLoc, EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPSectionsDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPSectionsDirectiveClass, OMPD_sections,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPSectionsDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPSectionsDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSectionsDirectiveClass;
}
};
/// \brief This represents '#pragma omp section' directive.
///
/// \code
/// #pragma omp section
/// \endcode
///
class OMPSectionDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPSectionDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPSectionDirectiveClass, OMPD_section,
StartLoc, EndLoc, 0, 1) {}
/// \brief Build an empty directive.
///
explicit OMPSectionDirective()
: OMPExecutableDirective(this, OMPSectionDirectiveClass, OMPD_section,
SourceLocation(), SourceLocation(), 0, 1) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPSectionDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPSectionDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSectionDirectiveClass;
}
};
/// \brief This represents '#pragma omp single' directive.
///
/// \code
/// #pragma omp single private(a,b) copyprivate(c,d)
/// \endcode
/// In this example directive '#pragma omp single' has clauses 'private' with
/// the variables 'a' and 'b' and 'copyprivate' with variables 'c' and 'd'.
///
class OMPSingleDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPSingleDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPSingleDirectiveClass, OMPD_single,
StartLoc, EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPSingleDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPSingleDirectiveClass, OMPD_single,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPSingleDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPSingleDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSingleDirectiveClass;
}
};
/// \brief This represents '#pragma omp master' directive.
///
/// \code
/// #pragma omp master
/// \endcode
///
class OMPMasterDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPMasterDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPMasterDirectiveClass, OMPD_master,
StartLoc, EndLoc, 0, 1) {}
/// \brief Build an empty directive.
///
explicit OMPMasterDirective()
: OMPExecutableDirective(this, OMPMasterDirectiveClass, OMPD_master,
SourceLocation(), SourceLocation(), 0, 1) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPMasterDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPMasterDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPMasterDirectiveClass;
}
};
/// \brief This represents '#pragma omp critical' directive.
///
/// \code
/// #pragma omp critical
/// \endcode
///
class OMPCriticalDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Name of the directive.
DeclarationNameInfo DirName;
/// \brief Build directive with the given start and end location.
///
/// \param Name Name of the directive.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPCriticalDirective(const DeclarationNameInfo &Name, SourceLocation StartLoc,
SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPCriticalDirectiveClass, OMPD_critical,
StartLoc, EndLoc, 0, 1),
DirName(Name) {}
/// \brief Build an empty directive.
///
explicit OMPCriticalDirective()
: OMPExecutableDirective(this, OMPCriticalDirectiveClass, OMPD_critical,
SourceLocation(), SourceLocation(), 0, 1),
DirName() {}
/// \brief Set name of the directive.
///
/// \param Name Name of the directive.
///
void setDirectiveName(const DeclarationNameInfo &Name) { DirName = Name; }
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param Name Name of the directive.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPCriticalDirective *
Create(const ASTContext &C, const DeclarationNameInfo &Name,
SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AssociatedStmt);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPCriticalDirective *CreateEmpty(const ASTContext &C, EmptyShell);
/// \brief Return name of the directive.
///
DeclarationNameInfo getDirectiveName() const { return DirName; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPCriticalDirectiveClass;
}
};
/// \brief This represents '#pragma omp parallel for' directive.
///
/// \code
/// #pragma omp parallel for private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp parallel for' has clauses 'private'
/// with the variables 'a' and 'b' and 'reduction' with operator '+' and
/// variables 'c' and 'd'.
///
class OMPParallelForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPParallelForDirectiveClass, OMPD_parallel_for,
StartLoc, EndLoc, CollapsedNum, NumClauses) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPParallelForDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPParallelForDirectiveClass, OMPD_parallel_for,
SourceLocation(), SourceLocation(), CollapsedNum,
NumClauses) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPParallelForDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelForDirectiveClass;
}
};
/// \brief This represents '#pragma omp parallel for simd' directive.
///
/// \code
/// #pragma omp parallel for simd private(a,b) linear(i,j:s) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp parallel for simd' has clauses
/// 'private' with the variables 'a' and 'b', 'linear' with variables 'i', 'j'
/// and linear step 's', 'reduction' with operator '+' and variables 'c' and
/// 'd'.
///
class OMPParallelForSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPParallelForSimdDirectiveClass,
OMPD_parallel_for_simd, StartLoc, EndLoc, CollapsedNum,
NumClauses) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPParallelForSimdDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPParallelForSimdDirectiveClass,
OMPD_parallel_for_simd, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPParallelForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPParallelForSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelForSimdDirectiveClass;
}
};
/// \brief This represents '#pragma omp parallel sections' directive.
///
/// \code
/// #pragma omp parallel sections private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp parallel sections' has clauses
/// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+'
/// and variables 'c' and 'd'.
///
class OMPParallelSectionsDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPParallelSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPParallelSectionsDirectiveClass,
OMPD_parallel_sections, StartLoc, EndLoc,
NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPParallelSectionsDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPParallelSectionsDirectiveClass,
OMPD_parallel_sections, SourceLocation(),
SourceLocation(), NumClauses, 1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPParallelSectionsDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPParallelSectionsDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelSectionsDirectiveClass;
}
};
/// \brief This represents '#pragma omp task' directive.
///
/// \code
/// #pragma omp task private(a,b) final(d)
/// \endcode
/// In this example directive '#pragma omp task' has clauses 'private' with the
/// variables 'a' and 'b' and 'final' with condition 'd'.
///
class OMPTaskDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPTaskDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTaskDirectiveClass, OMPD_task, StartLoc,
EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTaskDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTaskDirectiveClass, OMPD_task,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTaskDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTaskDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskDirectiveClass;
}
};
/// \brief This represents '#pragma omp taskyield' directive.
///
/// \code
/// #pragma omp taskyield
/// \endcode
///
class OMPTaskyieldDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPTaskyieldDirectiveClass, OMPD_taskyield,
StartLoc, EndLoc, 0, 0) {}
/// \brief Build an empty directive.
///
explicit OMPTaskyieldDirective()
: OMPExecutableDirective(this, OMPTaskyieldDirectiveClass, OMPD_taskyield,
SourceLocation(), SourceLocation(), 0, 0) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPTaskyieldDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPTaskyieldDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskyieldDirectiveClass;
}
};
/// \brief This represents '#pragma omp barrier' directive.
///
/// \code
/// #pragma omp barrier
/// \endcode
///
class OMPBarrierDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPBarrierDirectiveClass, OMPD_barrier,
StartLoc, EndLoc, 0, 0) {}
/// \brief Build an empty directive.
///
explicit OMPBarrierDirective()
: OMPExecutableDirective(this, OMPBarrierDirectiveClass, OMPD_barrier,
SourceLocation(), SourceLocation(), 0, 0) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPBarrierDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPBarrierDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPBarrierDirectiveClass;
}
};
/// \brief This represents '#pragma omp taskwait' directive.
///
/// \code
/// #pragma omp taskwait
/// \endcode
///
class OMPTaskwaitDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPTaskwaitDirectiveClass, OMPD_taskwait,
StartLoc, EndLoc, 0, 0) {}
/// \brief Build an empty directive.
///
explicit OMPTaskwaitDirective()
: OMPExecutableDirective(this, OMPTaskwaitDirectiveClass, OMPD_taskwait,
SourceLocation(), SourceLocation(), 0, 0) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPTaskwaitDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPTaskwaitDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskwaitDirectiveClass;
}
};
/// \brief This represents '#pragma omp taskgroup' directive.
///
/// \code
/// #pragma omp taskgroup
/// \endcode
///
class OMPTaskgroupDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTaskgroupDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPTaskgroupDirectiveClass, OMPD_taskgroup,
StartLoc, EndLoc, 0, 1) {}
/// \brief Build an empty directive.
///
explicit OMPTaskgroupDirective()
: OMPExecutableDirective(this, OMPTaskgroupDirectiveClass, OMPD_taskgroup,
SourceLocation(), SourceLocation(), 0, 1) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTaskgroupDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPTaskgroupDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskgroupDirectiveClass;
}
};
/// \brief This represents '#pragma omp flush' directive.
///
/// \code
/// #pragma omp flush(a,b)
/// \endcode
/// In this example directive '#pragma omp flush' has 2 arguments- variables 'a'
/// and 'b'.
/// 'omp flush' directive does not have clauses but have an optional list of
/// variables to flush. This list of variables is stored within some fake clause
/// FlushClause.
class OMPFlushDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPFlushDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPFlushDirectiveClass, OMPD_flush,
StartLoc, EndLoc, NumClauses, 0) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPFlushDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPFlushDirectiveClass, OMPD_flush,
SourceLocation(), SourceLocation(), NumClauses,
0) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses (only single OMPFlushClause clause is
/// allowed).
///
static OMPFlushDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPFlushDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPFlushDirectiveClass;
}
};
/// \brief This represents '#pragma omp ordered' directive.
///
/// \code
/// #pragma omp ordered
/// \endcode
///
class OMPOrderedDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPOrderedDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPOrderedDirectiveClass, OMPD_ordered,
StartLoc, EndLoc, 0, 1) {}
/// \brief Build an empty directive.
///
explicit OMPOrderedDirective()
: OMPExecutableDirective(this, OMPOrderedDirectiveClass, OMPD_ordered,
SourceLocation(), SourceLocation(), 0, 1) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPOrderedDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPOrderedDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPOrderedDirectiveClass;
}
};
/// \brief This represents '#pragma omp atomic' directive.
///
/// \code
/// #pragma omp atomic capture
/// \endcode
/// In this example directive '#pragma omp atomic' has clause 'capture'.
///
class OMPAtomicDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Used for 'atomic update' or 'atomic capture' constructs. They may
/// have atomic expressions of forms
/// \code
/// x = x binop expr;
/// x = expr binop x;
/// \endcode
/// This field is true for the first form of the expression and false for the
/// second. Required for correct codegen of non-associative operations (like
/// << or >>).
bool IsXLHSInRHSPart;
/// \brief Used for 'atomic update' or 'atomic capture' constructs. They may
/// have atomic expressions of forms
/// \code
/// v = x; <update x>;
/// <update x>; v = x;
/// \endcode
/// This field is true for the first(postfix) form of the expression and false
/// otherwise.
bool IsPostfixUpdate;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPAtomicDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPAtomicDirectiveClass, OMPD_atomic,
StartLoc, EndLoc, NumClauses, 5),
IsXLHSInRHSPart(false), IsPostfixUpdate(false) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPAtomicDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPAtomicDirectiveClass, OMPD_atomic,
SourceLocation(), SourceLocation(), NumClauses,
5),
IsXLHSInRHSPart(false), IsPostfixUpdate(false) {}
/// \brief Set 'x' part of the associated expression/statement.
void setX(Expr *X) { *std::next(child_begin()) = X; }
/// \brief Set helper expression of the form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
void setUpdateExpr(Expr *UE) { *std::next(child_begin(), 2) = UE; }
/// \brief Set 'v' part of the associated expression/statement.
void setV(Expr *V) { *std::next(child_begin(), 3) = V; }
/// \brief Set 'expr' part of the associated expression/statement.
void setExpr(Expr *E) { *std::next(child_begin(), 4) = E; }
public:
/// \brief Creates directive with a list of \a Clauses and 'x', 'v' and 'expr'
/// parts of the atomic construct (see Section 2.12.6, atomic Construct, for
/// detailed description of 'x', 'v' and 'expr').
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param X 'x' part of the associated expression/statement.
/// \param V 'v' part of the associated expression/statement.
/// \param E 'expr' part of the associated expression/statement.
/// \param UE Helper expression of the form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
/// \param IsXLHSInRHSPart true if \a UE has the first form and false if the
/// second.
/// \param IsPostfixUpdate true if original value of 'x' must be stored in
/// 'v', not an updated one.
static OMPAtomicDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *X, Expr *V,
Expr *E, Expr *UE, bool IsXLHSInRHSPart, bool IsPostfixUpdate);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPAtomicDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// \brief Get 'x' part of the associated expression/statement.
Expr *getX() { return cast_or_null<Expr>(*std::next(child_begin())); }
const Expr *getX() const {
return cast_or_null<Expr>(*std::next(child_begin()));
}
/// \brief Get helper expression of the form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
Expr *getUpdateExpr() {
return cast_or_null<Expr>(*std::next(child_begin(), 2));
}
const Expr *getUpdateExpr() const {
return cast_or_null<Expr>(*std::next(child_begin(), 2));
}
/// \brief Return true if helper update expression has form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' and false if it has form
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
bool isXLHSInRHSPart() const { return IsXLHSInRHSPart; }
/// \brief Return true if 'v' expression must be updated to original value of
/// 'x', false if 'v' must be updated to the new value of 'x'.
bool isPostfixUpdate() const { return IsPostfixUpdate; }
/// \brief Get 'v' part of the associated expression/statement.
Expr *getV() { return cast_or_null<Expr>(*std::next(child_begin(), 3)); }
const Expr *getV() const {
return cast_or_null<Expr>(*std::next(child_begin(), 3));
}
/// \brief Get 'expr' part of the associated expression/statement.
Expr *getExpr() { return cast_or_null<Expr>(*std::next(child_begin(), 4)); }
const Expr *getExpr() const {
return cast_or_null<Expr>(*std::next(child_begin(), 4));
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPAtomicDirectiveClass;
}
};
/// \brief This represents '#pragma omp target' directive.
///
/// \code
/// #pragma omp target if(a)
/// \endcode
/// In this example directive '#pragma omp target' has clause 'if' with
/// condition 'a'.
///
class OMPTargetDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPTargetDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetDirectiveClass, OMPD_target,
StartLoc, EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTargetDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetDirectiveClass, OMPD_target,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTargetDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetDirectiveClass;
}
};
/// \brief This represents '#pragma omp teams' directive.
///
/// \code
/// #pragma omp teams if(a)
/// \endcode
/// In this example directive '#pragma omp teams' has clause 'if' with
/// condition 'a'.
///
class OMPTeamsDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTeamsDirectiveClass, OMPD_teams,
StartLoc, EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTeamsDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTeamsDirectiveClass, OMPD_teams,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTeamsDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTeamsDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsDirectiveClass;
}
};
/// \brief This represents '#pragma omp cancellation point' directive.
///
/// \code
/// #pragma omp cancellation point for
/// \endcode
///
/// In this example a cancellation point is created for innermost 'for' region.
class OMPCancellationPointDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
OpenMPDirectiveKind CancelRegion;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPCancellationPointDirectiveClass,
OMPD_cancellation_point, StartLoc, EndLoc, 0, 0),
CancelRegion(OMPD_unknown) {}
/// \brief Build an empty directive.
///
explicit OMPCancellationPointDirective()
: OMPExecutableDirective(this, OMPCancellationPointDirectiveClass,
OMPD_cancellation_point, SourceLocation(),
SourceLocation(), 0, 0),
CancelRegion(OMPD_unknown) {}
/// \brief Set cancel region for current cancellation point.
/// \param CR Cancellation region.
void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; }
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPCancellationPointDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPCancellationPointDirective *CreateEmpty(const ASTContext &C,
EmptyShell);
/// \brief Get cancellation region for the current cancellation point.
OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPCancellationPointDirectiveClass;
}
};
/// \brief This represents '#pragma omp cancel' directive.
///
/// \code
/// #pragma omp cancel for
/// \endcode
///
/// In this example a cancel is created for innermost 'for' region.
class OMPCancelDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
OpenMPDirectiveKind CancelRegion;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPCancelDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPCancelDirectiveClass, OMPD_cancel,
StartLoc, EndLoc, 0, 0),
CancelRegion(OMPD_unknown) {}
/// \brief Build an empty directive.
///
explicit OMPCancelDirective()
: OMPExecutableDirective(this, OMPCancelDirectiveClass, OMPD_cancel,
SourceLocation(), SourceLocation(), 0, 0),
CancelRegion(OMPD_unknown) {}
/// \brief Set cancel region for current cancellation point.
/// \param CR Cancellation region.
void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; }
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPCancelDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPCancelDirective *CreateEmpty(const ASTContext &C, EmptyShell);
/// \brief Get cancellation region for the current cancellation point.
OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPCancelDirectiveClass;
}
};
} // end namespace clang
#endif
|
XSHA512_fmt_plug.c | /*
* This file is part of John the Ripper password cracker,
* Copyright (c) 2008,2011 by Solar Designer
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_XSHA512;
#elif FMT_REGISTERS_H
john_register_one(&fmt_XSHA512);
#else
#include "sha2.h"
#include "arch.h"
#include "params.h"
#include "common.h"
#include "formats.h"
#include "johnswap.h"
#include "simd-intrinsics.h"
#include "rawSHA512_common.h"
#ifdef _OPENMP
#include <omp.h>
#ifdef SIMD_COEF_64
#ifndef OMP_SCALE
#define OMP_SCALE 4096
#endif
#else
#ifndef OMP_SCALE
#define OMP_SCALE 8192
#endif
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "xsha512"
#define FORMAT_NAME "Mac OS X 10.7"
#define ALGORITHM_NAME "SHA512 " SHA512_ALGORITHM_NAME
#define PLAINTEXT_LENGTH 107
#define SALT_SIZE 4
#define SALT_ALIGN sizeof(uint32_t)
#ifdef SIMD_COEF_64
#define MIN_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512)
#define MAX_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512)
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#if ARCH_BITS >= 64 || defined(__SSE2__)
/* 64-bitness happens to correlate with faster memcpy() */
#define PRECOMPUTE_CTX_FOR_SALT
#else
#undef PRECOMPUTE_CTX_FOR_SALT
#endif
#define BINARY_SIZE DIGEST_SIZE
#ifdef SIMD_COEF_64
#define FMT_IS_64BIT
#define FMT_IS_BE
#include "common-simd-getpos.h"
static uint64_t (*saved_key)[SHA_BUF_SIZ*MAX_KEYS_PER_CRYPT];
static uint64_t (*crypt_out);
static int max_keys;
#else
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int (*saved_len);
static uint32_t (*crypt_out)[DIGEST_SIZE/sizeof(uint32_t)];
#ifdef PRECOMPUTE_CTX_FOR_SALT
static SHA512_CTX ctx_salt;
#else
static uint32_t saved_salt;
#endif
#endif
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
#ifdef SIMD_COEF_64
#ifndef _OPENMP
int omp_t = 1;
#endif
saved_key = mem_calloc_align(omp_t, sizeof(*saved_key), MEM_ALIGN_SIMD);
crypt_out = mem_calloc_align(self->params.max_keys_per_crypt,
8 * sizeof(uint64_t), MEM_ALIGN_SIMD);
max_keys = self->params.max_keys_per_crypt;
#else
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
saved_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_len));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
#endif
}
static void done(void)
{
MEM_FREE(crypt_out);
#ifndef SIMD_COEF_64
MEM_FREE(saved_len);
#endif
MEM_FREE(saved_key);
}
static void *get_salt(char *ciphertext)
{
static union {
unsigned char c[SALT_SIZE];
uint32_t dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
ciphertext += XSHA512_TAG_LENGTH;
p = ciphertext;
for (i = 0; i < sizeof(buf.c); i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
#ifdef SIMD_COEF_64
#define HASH_IDX (((unsigned int)index&(SIMD_COEF_64-1))+(unsigned int)index/SIMD_COEF_64*8*SIMD_COEF_64)
static int get_hash_0 (int index) { return crypt_out[HASH_IDX] & PH_MASK_0; }
static int get_hash_1 (int index) { return crypt_out[HASH_IDX] & PH_MASK_1; }
static int get_hash_2 (int index) { return crypt_out[HASH_IDX] & PH_MASK_2; }
static int get_hash_3 (int index) { return crypt_out[HASH_IDX] & PH_MASK_3; }
static int get_hash_4 (int index) { return crypt_out[HASH_IDX] & PH_MASK_4; }
static int get_hash_5 (int index) { return crypt_out[HASH_IDX] & PH_MASK_5; }
static int get_hash_6 (int index) { return crypt_out[HASH_IDX] & PH_MASK_6; }
#else
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
#endif
static int binary_hash_0 (void *p) { return *((uint64_t*)p) & PH_MASK_0; }
static int binary_hash_1 (void *p) { return *((uint64_t*)p) & PH_MASK_1; }
static int binary_hash_2 (void *p) { return *((uint64_t*)p) & PH_MASK_2; }
static int binary_hash_3 (void *p) { return *((uint64_t*)p) & PH_MASK_3; }
static int binary_hash_4 (void *p) { return *((uint64_t*)p) & PH_MASK_4; }
static int binary_hash_5 (void *p) { return *((uint64_t*)p) & PH_MASK_5; }
static int binary_hash_6 (void *p) { return *((uint64_t*)p) & PH_MASK_6; }
static int salt_hash(void *salt)
{
return *(uint32_t *)salt & (SALT_HASH_SIZE - 1);
}
static void set_salt(void *salt)
{
#ifndef SIMD_COEF_64
#ifdef PRECOMPUTE_CTX_FOR_SALT
SHA512_Init(&ctx_salt);
SHA512_Update(&ctx_salt, salt, SALT_SIZE);
#else
saved_salt = *(uint32_t *)salt;
#endif
#else
int i;
unsigned char *wucp = (unsigned char*)saved_key;
for (i = 0; i < max_keys; ++i) {
wucp[GETPOS(0, i)] = ((char*)salt)[0];
wucp[GETPOS(1, i)] = ((char*)salt)[1];
wucp[GETPOS(2, i)] = ((char*)salt)[2];
wucp[GETPOS(3, i)] = ((char*)salt)[3];
}
#endif
}
#define SALT_PREPENDED SALT_SIZE
#define NON_SIMD_SET_SAVED_LEN
#include "common-simd-setkey64.h"
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index;
#ifdef _OPENMP
#ifndef SIMD_COEF_64
#ifdef PRECOMPUTE_CTX_FOR_SALT
#pragma omp parallel for default(none) private(index) shared(ctx_salt, saved_key, saved_len, crypt_out)
#else
#pragma omp parallel for default(none) private(index) shared(saved_salt, saved_key, saved_len, crypt_out)
#endif
#else
#pragma omp parallel for
#endif
#endif
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) {
#ifdef SIMD_COEF_64
SIMDSHA512body(&saved_key[index/MAX_KEYS_PER_CRYPT], &crypt_out[HASH_IDX], NULL, SSEi_MIXED_IN);
#else
SHA512_CTX ctx;
#ifdef PRECOMPUTE_CTX_FOR_SALT
memcpy(&ctx, &ctx_salt, sizeof(ctx));
#else
SHA512_Init(&ctx);
SHA512_Update(&ctx, &saved_salt, SALT_SIZE);
#endif
SHA512_Update(&ctx, saved_key[index], saved_len[index]);
SHA512_Final((unsigned char *)(crypt_out[index]), &ctx);
#endif
}
return count;
}
static int cmp_all(void *binary, int count)
{
unsigned int index;
for (index = 0; index < count; index++)
#ifdef SIMD_COEF_64
if (((uint64_t *) binary)[0] == crypt_out[HASH_IDX])
#else
if ( ((uint32_t*)binary)[0] == crypt_out[index][0] )
#endif
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
#ifdef SIMD_COEF_64
int i;
for (i = 0; i < BINARY_SIZE/sizeof(uint64_t); i++)
if (((uint64_t*) binary)[i] != crypt_out[HASH_IDX + i*SIMD_COEF_64])
return 0;
return 1;
#else
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
#endif
}
static int cmp_exact(char *source, int index)
{
return 1;
}
struct fmt_main fmt_XSHA512 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
XSHA512_BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ XSHA512_FORMAT_TAG },
sha512_common_tests_xsha512
}, {
init,
done,
fmt_default_reset,
sha512_common_prepare_xsha512,
sha512_common_valid_xsha512,
sha512_common_split_xsha512,
sha512_common_binary_xsha512,
get_salt,
{ NULL },
fmt_default_source,
{
binary_hash_0,
binary_hash_1,
binary_hash_2,
binary_hash_3,
binary_hash_4,
binary_hash_5,
binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
GB_binop__rminus_int16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rminus_int16)
// A.*B function (eWiseMult): GB (_AemultB_08__rminus_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__rminus_int16)
// A.*B function (eWiseMult): GB (_AemultB_04__rminus_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rminus_int16)
// A*D function (colscale): GB (_AxD__rminus_int16)
// D*A function (rowscale): GB (_DxB__rminus_int16)
// C+=B function (dense accum): GB (_Cdense_accumB__rminus_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__rminus_int16)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rminus_int16)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rminus_int16)
// C=scalar+B GB (_bind1st__rminus_int16)
// C=scalar+B' GB (_bind1st_tran__rminus_int16)
// C=A+scalar GB (_bind2nd__rminus_int16)
// C=A'+scalar GB (_bind2nd_tran__rminus_int16)
// C type: int16_t
// A type: int16_t
// B,b type: int16_t
// BinaryOp: cij = (bij - aij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (y - x) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RMINUS || GxB_NO_INT16 || GxB_NO_RMINUS_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rminus_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__rminus_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rminus_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rminus_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rminus_int16)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rminus_int16)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rminus_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__rminus_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rminus_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__rminus_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rminus_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rminus_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = (bij - x) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rminus_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = (y - aij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij - x) ; \
}
GrB_Info GB (_bind1st_tran__rminus_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (y - aij) ; \
}
GrB_Info GB (_bind2nd_tran__rminus_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
example-omp.c | // PWR015: Avoid copying unnecessary array elements to the GPU
// https://www.appentra.com/knowledge/checks/pwr015
void foo() {
int A[100], B[100], sum[100];
#pragma omp target map(to: A[0:100], B[0:100]) map(from: sum[0:100])
#pragma omp parallel for
for (int i = 0; i < 50; i++) {
sum[i] = A[i] + B[i];
}
}
|
matrix_mul_par_original.c | #include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <omp.h>
/*
Simple matrix multiplication example.
*/
/*
matrix multiplication
*/
void matrix_mult(double const * A, double const * B, double * C, int const N, int const M, int const K)
{
int BS2 = 64;
int BS1 = 64;
for (int ii = 0; ii < N; ii++)
{
for (int jj = 0; jj < K; jj++)
{
//C[i][j] = 0;
C[K * ii + jj] = 0;
}
}
for (size_t l_block = 0; l_block < M; l_block = l_block + (BS1))
{
for (size_t j_block = 0; j_block < K; j_block = j_block + (BS2))
{
for (int i = 0; i < N; i++)
{
int l_limit = l_block + BS1;
if (l_limit > M) l_limit = M;
for (int l = l_block; l < l_limit; l++)
{
int j_limit = j_block + BS2;
if (j_limit > K) j_limit = K;
for (int j = j_block; j < j_limit; j++)
{
//C[i][j] += A[i][l]*B[l][j];
C[K * i + j] = C[K * i + j] + (A[M * i + l] * B[K * l + j]);
}
}
}
}
}
}
void matrix_mult_call_specialization(double A[131072], double B[131072], double C[262144], int const N, int const M, int const K)
{
int BS2 = 64;
int BS1 = 64;
for (int ii = 0; ii < N; ii++)
{
for (int jj = 0; jj < K; jj++)
{
C[K * ii + jj] = 0;
}
}
for (int l_block = 0; l_block < M; l_block = l_block + (BS1))
{
for (int j_block = 0; j_block < K; j_block = j_block + (BS2))
{
for (int i = 0; i < N; i++)
{
int l_limit = l_block + BS1;
if (l_limit > M) l_limit = M;
for (int l = l_block; l < l_limit; l++)
{
int j_limit = j_block + BS2;
if (j_limit > K) j_limit = K;
for (int j = j_block; j < j_limit; j++)
{
C[K * i + j] = C[K * i + j] + (A[M * i + l] * B[K * l + j]);
}
}
}
}
}
}
void matrix_mult_call_specialization_0(double A[131072], double B[131072], double C[262144], int const N, int const M, int const K)
{
int BS2 = 64;
int BS1 = 64;
/*************** Clava msgError **************
output C[K * ii + jj]#45 -> C[K * ii + jj]#45 +,* MoZ sameloop IsdependentOuterloop=true IsdependentCurrentloop=true IsdependentInnerloop=true
check file : /deploop#42[matrix_mult_call_specialization_0]Array_C.t
****************************************/
for (int ii = 0; ii < N; ii++)
{
#pragma omp parallel for default(shared) firstprivate(C, K, ii)
// deploop#43[matrix_mult_call_specialization_0]Array_C.t
for (int jj = 0; jj < K; jj++)
{
//C[i][j] = 0;
C[K * ii + jj] = 0;
}
}
#pragma omp parallel for default(shared) firstprivate(A, B, M, BS1, K, BS2, N) reduction (+:C[:262144])
// deploop#48[matrix_mult_call_specialization_0]Array_A_B_C.t
for (int l_block = 0; l_block < M; l_block = l_block + (BS1))
{
//#pragma omp parallel for default(shared) firstprivate(A, B, K, BS2, N, l_block, BS1, M) reduction (+:C[:262144])
// deploop#49[matrix_mult_call_specialization_0]Array_A_B_C.t
for (int j_block = 0; j_block < K; j_block = j_block + (BS2))
{
/*************** Clava msgError **************
anti C[K * i + j]#58 -> C[K * i + j]#58 0,+,* MoZ sameloop IsdependentOuterloop=true IsdependentCurrentloop=true IsdependentInnerloop=true
anti C[K * i + j]#58 -> C[K * i + j]#58 +,*,* MoZ sameloop IsdependentOuterloop=true IsdependentCurrentloop=true IsdependentInnerloop=true
flow C[K * i + j]#58 -> C[K * i + j]#58 0,+,* MoZ sameloop IsdependentOuterloop=true IsdependentCurrentloop=true IsdependentInnerloop=true
flow C[K * i + j]#58 -> C[K * i + j]#58 +,*,* MoZ sameloop IsdependentOuterloop=true IsdependentCurrentloop=true IsdependentInnerloop=true
output C[K * i + j]#58 -> C[K * i + j]#58 +,*,* MoZ sameloop IsdependentOuterloop=true IsdependentCurrentloop=true IsdependentInnerloop=true
check file : /deploop#50[matrix_mult_call_specialization_0]Array_A_B_C.t
****************************************/
for (int i = 0; i < N; i++)
{
int l_limit = l_block + BS1;
if (l_limit > M) l_limit = M;
//#pragma omp parallel for default(shared) firstprivate(A, B, l_block, l_limit, j_block, BS2, K, M, i) reduction (+:C[:262144])
// deploop#53[matrix_mult_call_specialization_0]Array_A_B_C.t
for (int l = l_block; l < l_limit; l++)
{
int j_limit = j_block + BS2;
if (j_limit > K) j_limit = K;
//#pragma omp parallel for default(shared) firstprivate(A, B, C, j_block, j_limit, M, i, K, l)
// deploop#56[matrix_mult_call_specialization_0]Array_A_B_C.t
for (int j = j_block; j < j_limit; j++)
{
//C[i][j] += A[i][l]*B[l][j];
C[K * i + j] = C[K * i + j] + (A[M * i + l] * B[K * l + j]);
}
}
}
}
}
}
/*
* Set an N by M matrix A to random values
*/
void init_matrix(double * A, int const N, int const M)
{
for (int i = 0; i < N; ++i)
{
for (int j = 0; j < M; ++j)
{
//A[i][j] = ((double) rand()) / (double) RAND_MAX;
A[M * i + j] = ((double) rand()) / (double) 32767;
}
}
}
void print_matrix_result(double * A, int const N, int const K)
{
double acc = 0.0;
for (int i = 0; i < N; ++i)
{
for (int j = 0; j < K; ++j)
{
//acc += A[i][j];
acc = acc + (A[K * i + j]);
}
}
printf("Result acc: %f\n", acc);
}
void test_matrix_mul()
{
int N = 512;
int M = 256;
int K = 512;
//double A[N][M];
//double B[M][K];
//double C[N][K];
double * A = (double *) malloc(N * M * sizeof(double));
double * B = (double *) malloc(M * K * sizeof(double));
double * C = (double *) malloc(N * K * sizeof(double));
double * C_OMP = (double *) malloc(N * K * sizeof(double));
// initialize matrices
init_matrix(A, N, M);
init_matrix(B, M, K);
// do: C = A*B
matrix_mult_call_specialization(A, B, C, N, M, K);
matrix_mult_call_specialization_0(A, B, C_OMP, N, M, K);
print_matrix_result(C, N, K);
print_matrix_result(C_OMP, N, K);
free(A);
free(B);
free(C);
}
int main()
{
// To make results repeatable
srand(0);
test_matrix_mul();
}
|
morphology.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M M OOO RRRR PPPP H H OOO L OOO GGGG Y Y %
% MM MM O O R R P P H H O O L O O G Y Y %
% M M M O O RRRR PPPP HHHHH O O L O O G GGG Y %
% M M O O R R P H H O O L O O G G Y %
% M M OOO R R P H H OOO LLLLL OOO GGG Y %
% %
% %
% MagickCore Morphology Methods %
% %
% Software Design %
% Anthony Thyssen %
% January 2010 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Morphology is the application of various kernels, of any size or shape, to an
% image in various ways (typically binary, but not always).
%
% Convolution (weighted sum or average) is just one specific type of
% morphology. Just one that is very common for image bluring and sharpening
% effects. Not only 2D Gaussian blurring, but also 2-pass 1D Blurring.
%
% This module provides not only a general morphology function, and the ability
% to apply more advanced or iterative morphologies, but also functions for the
% generation of many different types of kernel arrays from user supplied
% arguments. Prehaps even the generation of a kernel from a small image.
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/linked-list.h"
#include "MagickCore/list.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/morphology.h"
#include "MagickCore/morphology-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/prepress.h"
#include "MagickCore/quantize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/registry.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
/*
Other global definitions used by module.
*/
#define Minimize(assign,value) assign=MagickMin(assign,value)
#define Maximize(assign,value) assign=MagickMax(assign,value)
/* Integer Factorial Function - for a Binomial kernel */
#if 1
static inline size_t fact(size_t n)
{
size_t f,l;
for(f=1, l=2; l <= n; f=f*l, l++);
return(f);
}
#elif 1 /* glibc floating point alternatives */
#define fact(n) ((size_t)tgamma((double)n+1))
#else
#define fact(n) ((size_t)lgamma((double)n+1))
#endif
/* Currently these are only internal to this module */
static void
CalcKernelMetaData(KernelInfo *),
ExpandMirrorKernelInfo(KernelInfo *),
ExpandRotateKernelInfo(KernelInfo *, const double),
RotateKernelInfo(KernelInfo *, double);
/* Quick function to find last kernel in a kernel list */
static inline KernelInfo *LastKernelInfo(KernelInfo *kernel)
{
while (kernel->next != (KernelInfo *) NULL)
kernel=kernel->next;
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireKernelInfo() takes the given string (generally supplied by the
% user) and converts it into a Morphology/Convolution Kernel. This allows
% users to specify a kernel from a number of pre-defined kernels, or to fully
% specify their own kernel for a specific Convolution or Morphology
% Operation.
%
% The kernel so generated can be any rectangular array of floating point
% values (doubles) with the 'control point' or 'pixel being affected'
% anywhere within that array of values.
%
% Previously IM was restricted to a square of odd size using the exact
% center as origin, this is no longer the case, and any rectangular kernel
% with any value being declared the origin. This in turn allows the use of
% highly asymmetrical kernels.
%
% The floating point values in the kernel can also include a special value
% known as 'nan' or 'not a number' to indicate that this value is not part
% of the kernel array. This allows you to shaped the kernel within its
% rectangular area. That is 'nan' values provide a 'mask' for the kernel
% shape. However at least one non-nan value must be provided for correct
% working of a kernel.
%
% The returned kernel should be freed using the DestroyKernelInfo() when you
% are finished with it. Do not free this memory yourself.
%
% Input kernel defintion strings can consist of any of three types.
%
% "name:args[[@><]"
% Select from one of the built in kernels, using the name and
% geometry arguments supplied. See AcquireKernelBuiltIn()
%
% "WxH[+X+Y][@><]:num, num, num ..."
% a kernel of size W by H, with W*H floating point numbers following.
% the 'center' can be optionally be defined at +X+Y (such that +0+0
% is top left corner). If not defined the pixel in the center, for
% odd sizes, or to the immediate top or left of center for even sizes
% is automatically selected.
%
% "num, num, num, num, ..."
% list of floating point numbers defining an 'old style' odd sized
% square kernel. At least 9 values should be provided for a 3x3
% square kernel, 25 for a 5x5 square kernel, 49 for 7x7, etc.
% Values can be space or comma separated. This is not recommended.
%
% You can define a 'list of kernels' which can be used by some morphology
% operators A list is defined as a semi-colon separated list kernels.
%
% " kernel ; kernel ; kernel ; "
%
% Any extra ';' characters, at start, end or between kernel defintions are
% simply ignored.
%
% The special flags will expand a single kernel, into a list of rotated
% kernels. A '@' flag will expand a 3x3 kernel into a list of 45-degree
% cyclic rotations, while a '>' will generate a list of 90-degree rotations.
% The '<' also exands using 90-degree rotates, but giving a 180-degree
% reflected kernel before the +/- 90-degree rotations, which can be important
% for Thinning operations.
%
% Note that 'name' kernels will start with an alphabetic character while the
% new kernel specification has a ':' character in its specification string.
% If neither is the case, it is assumed an old style of a simple list of
% numbers generating a odd-sized square kernel has been given.
%
% The format of the AcquireKernal method is:
%
% KernelInfo *AcquireKernelInfo(const char *kernel_string)
%
% A description of each parameter follows:
%
% o kernel_string: the Morphology/Convolution kernel wanted.
%
*/
/* This was separated so that it could be used as a separate
** array input handling function, such as for -color-matrix
*/
static KernelInfo *ParseKernelArray(const char *kernel_string)
{
KernelInfo
*kernel;
char
token[MagickPathExtent];
const char
*p,
*end;
register ssize_t
i;
double
nan = sqrt((double)-1.0); /* Special Value : Not A Number */
MagickStatusType
flags;
GeometryInfo
args;
kernel=(KernelInfo *) AcquireQuantumMemory(1,sizeof(*kernel));
if (kernel == (KernelInfo *) NULL)
return(kernel);
(void) memset(kernel,0,sizeof(*kernel));
kernel->minimum = kernel->maximum = kernel->angle = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->type = UserDefinedKernel;
kernel->next = (KernelInfo *) NULL;
kernel->signature=MagickCoreSignature;
if (kernel_string == (const char *) NULL)
return(kernel);
/* find end of this specific kernel definition string */
end = strchr(kernel_string, ';');
if ( end == (char *) NULL )
end = strchr(kernel_string, '\0');
/* clear flags - for Expanding kernel lists thorugh rotations */
flags = NoValue;
/* Has a ':' in argument - New user kernel specification
FUTURE: this split on ':' could be done by StringToken()
*/
p = strchr(kernel_string, ':');
if ( p != (char *) NULL && p < end)
{
/* ParseGeometry() needs the geometry separated! -- Arrgghh */
memcpy(token, kernel_string, (size_t) (p-kernel_string));
token[p-kernel_string] = '\0';
SetGeometryInfo(&args);
flags = ParseGeometry(token, &args);
/* Size handling and checks of geometry settings */
if ( (flags & WidthValue) == 0 ) /* if no width then */
args.rho = args.sigma; /* then width = height */
if ( args.rho < 1.0 ) /* if width too small */
args.rho = 1.0; /* then width = 1 */
if ( args.sigma < 1.0 ) /* if height too small */
args.sigma = args.rho; /* then height = width */
kernel->width = (size_t)args.rho;
kernel->height = (size_t)args.sigma;
/* Offset Handling and Checks */
if ( args.xi < 0.0 || args.psi < 0.0 )
return(DestroyKernelInfo(kernel));
kernel->x = ((flags & XValue)!=0) ? (ssize_t)args.xi
: (ssize_t) (kernel->width-1)/2;
kernel->y = ((flags & YValue)!=0) ? (ssize_t)args.psi
: (ssize_t) (kernel->height-1)/2;
if ( kernel->x >= (ssize_t) kernel->width ||
kernel->y >= (ssize_t) kernel->height )
return(DestroyKernelInfo(kernel));
p++; /* advance beyond the ':' */
}
else
{ /* ELSE - Old old specification, forming odd-square kernel */
/* count up number of values given */
p=(const char *) kernel_string;
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\''))
p++; /* ignore "'" chars for convolve filter usage - Cristy */
for (i=0; p < end; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
}
/* set the size of the kernel - old sized square */
kernel->width = kernel->height= (size_t) sqrt((double) i+1.0);
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
p=(const char *) kernel_string;
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\''))
p++; /* ignore "'" chars for convolve filter usage - Cristy */
}
/* Read in the kernel values from rest of input string argument */
kernel->values=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory(
kernel->width,kernel->height*sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
kernel->minimum=MagickMaximumValue;
kernel->maximum=(-MagickMaximumValue);
kernel->negative_range = kernel->positive_range = 0.0;
for (i=0; (i < (ssize_t) (kernel->width*kernel->height)) && (p < end); i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
if ( LocaleCompare("nan",token) == 0
|| LocaleCompare("-",token) == 0 ) {
kernel->values[i] = nan; /* this value is not part of neighbourhood */
}
else {
kernel->values[i] = StringToDouble(token,(char **) NULL);
( kernel->values[i] < 0)
? ( kernel->negative_range += kernel->values[i] )
: ( kernel->positive_range += kernel->values[i] );
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
}
}
/* sanity check -- no more values in kernel definition */
(void) GetNextToken(p,&p,MagickPathExtent,token);
if ( *token != '\0' && *token != ';' && *token != '\'' )
return(DestroyKernelInfo(kernel));
#if 0
/* this was the old method of handling a incomplete kernel */
if ( i < (ssize_t) (kernel->width*kernel->height) ) {
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
for ( ; i < (ssize_t) (kernel->width*kernel->height); i++)
kernel->values[i]=0.0;
}
#else
/* Number of values for kernel was not enough - Report Error */
if ( i < (ssize_t) (kernel->width*kernel->height) )
return(DestroyKernelInfo(kernel));
#endif
/* check that we recieved at least one real (non-nan) value! */
if (kernel->minimum == MagickMaximumValue)
return(DestroyKernelInfo(kernel));
if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel size */
ExpandRotateKernelInfo(kernel, 45.0); /* cyclic rotate 3x3 kernels */
else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 90.0); /* 90 degree rotate of kernel */
else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */
ExpandMirrorKernelInfo(kernel); /* 90 degree mirror rotate */
return(kernel);
}
static KernelInfo *ParseKernelName(const char *kernel_string,
ExceptionInfo *exception)
{
char
token[MagickPathExtent];
const char
*p,
*end;
GeometryInfo
args;
KernelInfo
*kernel;
MagickStatusType
flags;
ssize_t
type;
/* Parse special 'named' kernel */
(void) GetNextToken(kernel_string,&p,MagickPathExtent,token);
type=ParseCommandOption(MagickKernelOptions,MagickFalse,token);
if ( type < 0 || type == UserDefinedKernel )
return((KernelInfo *) NULL); /* not a valid named kernel */
while (((isspace((int) ((unsigned char) *p)) != 0) ||
(*p == ',') || (*p == ':' )) && (*p != '\0') && (*p != ';'))
p++;
end = strchr(p, ';'); /* end of this kernel defintion */
if ( end == (char *) NULL )
end = strchr(p, '\0');
/* ParseGeometry() needs the geometry separated! -- Arrgghh */
memcpy(token, p, (size_t) (end-p));
token[end-p] = '\0';
SetGeometryInfo(&args);
flags = ParseGeometry(token, &args);
#if 0
/* For Debugging Geometry Input */
(void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n",
flags, args.rho, args.sigma, args.xi, args.psi );
#endif
/* special handling of missing values in input string */
switch( type ) {
/* Shape Kernel Defaults */
case UnityKernel:
if ( (flags & WidthValue) == 0 )
args.rho = 1.0; /* Default scale = 1.0, zero is valid */
break;
case SquareKernel:
case DiamondKernel:
case OctagonKernel:
case DiskKernel:
case PlusKernel:
case CrossKernel:
if ( (flags & HeightValue) == 0 )
args.sigma = 1.0; /* Default scale = 1.0, zero is valid */
break;
case RingKernel:
if ( (flags & XValue) == 0 )
args.xi = 1.0; /* Default scale = 1.0, zero is valid */
break;
case RectangleKernel: /* Rectangle - set size defaults */
if ( (flags & WidthValue) == 0 ) /* if no width then */
args.rho = args.sigma; /* then width = height */
if ( args.rho < 1.0 ) /* if width too small */
args.rho = 3; /* then width = 3 */
if ( args.sigma < 1.0 ) /* if height too small */
args.sigma = args.rho; /* then height = width */
if ( (flags & XValue) == 0 ) /* center offset if not defined */
args.xi = (double)(((ssize_t)args.rho-1)/2);
if ( (flags & YValue) == 0 )
args.psi = (double)(((ssize_t)args.sigma-1)/2);
break;
/* Distance Kernel Defaults */
case ChebyshevKernel:
case ManhattanKernel:
case OctagonalKernel:
case EuclideanKernel:
if ( (flags & HeightValue) == 0 ) /* no distance scale */
args.sigma = 100.0; /* default distance scaling */
else if ( (flags & AspectValue ) != 0 ) /* '!' flag */
args.sigma = QuantumRange/(args.sigma+1); /* maximum pixel distance */
else if ( (flags & PercentValue ) != 0 ) /* '%' flag */
args.sigma *= QuantumRange/100.0; /* percentage of color range */
break;
default:
break;
}
kernel = AcquireKernelBuiltIn((KernelInfoType)type, &args, exception);
if ( kernel == (KernelInfo *) NULL )
return(kernel);
/* global expand to rotated kernel list - only for single kernels */
if ( kernel->next == (KernelInfo *) NULL ) {
if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 45.0);
else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 90.0);
else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */
ExpandMirrorKernelInfo(kernel);
}
return(kernel);
}
MagickExport KernelInfo *AcquireKernelInfo(const char *kernel_string,
ExceptionInfo *exception)
{
KernelInfo
*kernel,
*new_kernel;
char
*kernel_cache,
token[MagickPathExtent];
const char
*p;
if (kernel_string == (const char *) NULL)
return(ParseKernelArray(kernel_string));
p=kernel_string;
kernel_cache=(char *) NULL;
if (*kernel_string == '@')
{
kernel_cache=FileToString(kernel_string+1,~0UL,exception);
if (kernel_cache == (char *) NULL)
return((KernelInfo *) NULL);
p=(const char *) kernel_cache;
}
kernel=NULL;
while (GetNextToken(p,(const char **) NULL,MagickPathExtent,token), *token != '\0')
{
/* ignore extra or multiple ';' kernel separators */
if (*token != ';')
{
/* tokens starting with alpha is a Named kernel */
if (isalpha((int) ((unsigned char) *token)) != 0)
new_kernel=ParseKernelName(p,exception);
else /* otherwise a user defined kernel array */
new_kernel=ParseKernelArray(p);
/* Error handling -- this is not proper error handling! */
if (new_kernel == (KernelInfo *) NULL)
{
if (kernel != (KernelInfo *) NULL)
kernel=DestroyKernelInfo(kernel);
return((KernelInfo *) NULL);
}
/* initialise or append the kernel list */
if (kernel == (KernelInfo *) NULL)
kernel=new_kernel;
else
LastKernelInfo(kernel)->next=new_kernel;
}
/* look for the next kernel in list */
p=strchr(p,';');
if (p == (char *) NULL)
break;
p++;
}
if (kernel_cache != (char *) NULL)
kernel_cache=DestroyString(kernel_cache);
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e K e r n e l B u i l t I n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireKernelBuiltIn() returned one of the 'named' built-in types of
% kernels used for special purposes such as gaussian blurring, skeleton
% pruning, and edge distance determination.
%
% They take a KernelType, and a set of geometry style arguments, which were
% typically decoded from a user supplied string, or from a more complex
% Morphology Method that was requested.
%
% The format of the AcquireKernalBuiltIn method is:
%
% KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type,
% const GeometryInfo args)
%
% A description of each parameter follows:
%
% o type: the pre-defined type of kernel wanted
%
% o args: arguments defining or modifying the kernel
%
% Convolution Kernels
%
% Unity
% The a No-Op or Scaling single element kernel.
%
% Gaussian:{radius},{sigma}
% Generate a two-dimensional gaussian kernel, as used by -gaussian.
% The sigma for the curve is required. The resulting kernel is
% normalized,
%
% If 'sigma' is zero, you get a single pixel on a field of zeros.
%
% NOTE: that the 'radius' is optional, but if provided can limit (clip)
% the final size of the resulting kernel to a square 2*radius+1 in size.
% The radius should be at least 2 times that of the sigma value, or
% sever clipping and aliasing may result. If not given or set to 0 the
% radius will be determined so as to produce the best minimal error
% result, which is usally much larger than is normally needed.
%
% LoG:{radius},{sigma}
% "Laplacian of a Gaussian" or "Mexician Hat" Kernel.
% The supposed ideal edge detection, zero-summing kernel.
%
% An alturnative to this kernel is to use a "DoG" with a sigma ratio of
% approx 1.6 (according to wikipedia).
%
% DoG:{radius},{sigma1},{sigma2}
% "Difference of Gaussians" Kernel.
% As "Gaussian" but with a gaussian produced by 'sigma2' subtracted
% from the gaussian produced by 'sigma1'. Typically sigma2 > sigma1.
% The result is a zero-summing kernel.
%
% Blur:{radius},{sigma}[,{angle}]
% Generates a 1 dimensional or linear gaussian blur, at the angle given
% (current restricted to orthogonal angles). If a 'radius' is given the
% kernel is clipped to a width of 2*radius+1. Kernel can be rotated
% by a 90 degree angle.
%
% If 'sigma' is zero, you get a single pixel on a field of zeros.
%
% Note that two convolutions with two "Blur" kernels perpendicular to
% each other, is equivalent to a far larger "Gaussian" kernel with the
% same sigma value, However it is much faster to apply. This is how the
% "-blur" operator actually works.
%
% Comet:{width},{sigma},{angle}
% Blur in one direction only, much like how a bright object leaves
% a comet like trail. The Kernel is actually half a gaussian curve,
% Adding two such blurs in opposite directions produces a Blur Kernel.
% Angle can be rotated in multiples of 90 degrees.
%
% Note that the first argument is the width of the kernel and not the
% radius of the kernel.
%
% Binomial:[{radius}]
% Generate a discrete kernel using a 2 dimentional Pascel's Triangle
% of values. Used for special forma of image filters.
%
% # Still to be implemented...
% #
% # Filter2D
% # Filter1D
% # Set kernel values using a resize filter, and given scale (sigma)
% # Cylindrical or Linear. Is this possible with an image?
% #
%
% Named Constant Convolution Kernels
%
% All these are unscaled, zero-summing kernels by default. As such for
% non-HDRI version of ImageMagick some form of normalization, user scaling,
% and biasing the results is recommended, to prevent the resulting image
% being 'clipped'.
%
% The 3x3 kernels (most of these) can be circularly rotated in multiples of
% 45 degrees to generate the 8 angled varients of each of the kernels.
%
% Laplacian:{type}
% Discrete Lapacian Kernels, (without normalization)
% Type 0 : 3x3 with center:8 surounded by -1 (8 neighbourhood)
% Type 1 : 3x3 with center:4 edge:-1 corner:0 (4 neighbourhood)
% Type 2 : 3x3 with center:4 edge:1 corner:-2
% Type 3 : 3x3 with center:4 edge:-2 corner:1
% Type 5 : 5x5 laplacian
% Type 7 : 7x7 laplacian
% Type 15 : 5x5 LoG (sigma approx 1.4)
% Type 19 : 9x9 LoG (sigma approx 1.4)
%
% Sobel:{angle}
% Sobel 'Edge' convolution kernel (3x3)
% | -1, 0, 1 |
% | -2, 0,-2 |
% | -1, 0, 1 |
%
% Roberts:{angle}
% Roberts convolution kernel (3x3)
% | 0, 0, 0 |
% | -1, 1, 0 |
% | 0, 0, 0 |
%
% Prewitt:{angle}
% Prewitt Edge convolution kernel (3x3)
% | -1, 0, 1 |
% | -1, 0, 1 |
% | -1, 0, 1 |
%
% Compass:{angle}
% Prewitt's "Compass" convolution kernel (3x3)
% | -1, 1, 1 |
% | -1,-2, 1 |
% | -1, 1, 1 |
%
% Kirsch:{angle}
% Kirsch's "Compass" convolution kernel (3x3)
% | -3,-3, 5 |
% | -3, 0, 5 |
% | -3,-3, 5 |
%
% FreiChen:{angle}
% Frei-Chen Edge Detector is based on a kernel that is similar to
% the Sobel Kernel, but is designed to be isotropic. That is it takes
% into account the distance of the diagonal in the kernel.
%
% | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) |
% | 1, 0, -1 |
%
% FreiChen:{type},{angle}
%
% Frei-Chen Pre-weighted kernels...
%
% Type 0: default un-nomalized version shown above.
%
% Type 1: Orthogonal Kernel (same as type 11 below)
% | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 1, 0, -1 |
%
% Type 2: Diagonal form of Kernel...
% | 1, sqrt(2), 0 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 0, -sqrt(2) -1 |
%
% However this kernel is als at the heart of the FreiChen Edge Detection
% Process which uses a set of 9 specially weighted kernel. These 9
% kernels not be normalized, but directly applied to the image. The
% results is then added together, to produce the intensity of an edge in
% a specific direction. The square root of the pixel value can then be
% taken as the cosine of the edge, and at least 2 such runs at 90 degrees
% from each other, both the direction and the strength of the edge can be
% determined.
%
% Type 10: All 9 of the following pre-weighted kernels...
%
% Type 11: | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 1, 0, -1 |
%
% Type 12: | 1, sqrt(2), 1 |
% | 0, 0, 0 | / 2*sqrt(2)
% | 1, sqrt(2), 1 |
%
% Type 13: | sqrt(2), -1, 0 |
% | -1, 0, 1 | / 2*sqrt(2)
% | 0, 1, -sqrt(2) |
%
% Type 14: | 0, 1, -sqrt(2) |
% | -1, 0, 1 | / 2*sqrt(2)
% | sqrt(2), -1, 0 |
%
% Type 15: | 0, -1, 0 |
% | 1, 0, 1 | / 2
% | 0, -1, 0 |
%
% Type 16: | 1, 0, -1 |
% | 0, 0, 0 | / 2
% | -1, 0, 1 |
%
% Type 17: | 1, -2, 1 |
% | -2, 4, -2 | / 6
% | -1, -2, 1 |
%
% Type 18: | -2, 1, -2 |
% | 1, 4, 1 | / 6
% | -2, 1, -2 |
%
% Type 19: | 1, 1, 1 |
% | 1, 1, 1 | / 3
% | 1, 1, 1 |
%
% The first 4 are for edge detection, the next 4 are for line detection
% and the last is to add a average component to the results.
%
% Using a special type of '-1' will return all 9 pre-weighted kernels
% as a multi-kernel list, so that you can use them directly (without
% normalization) with the special "-set option:morphology:compose Plus"
% setting to apply the full FreiChen Edge Detection Technique.
%
% If 'type' is large it will be taken to be an actual rotation angle for
% the default FreiChen (type 0) kernel. As such FreiChen:45 will look
% like a Sobel:45 but with 'sqrt(2)' instead of '2' values.
%
% WARNING: The above was layed out as per
% http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf
% But rotated 90 degrees so direction is from left rather than the top.
% I have yet to find any secondary confirmation of the above. The only
% other source found was actual source code at
% http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf
% Neigher paper defineds the kernels in a way that looks locical or
% correct when taken as a whole.
%
% Boolean Kernels
%
% Diamond:[{radius}[,{scale}]]
% Generate a diamond shaped kernel with given radius to the points.
% Kernel size will again be radius*2+1 square and defaults to radius 1,
% generating a 3x3 kernel that is slightly larger than a square.
%
% Square:[{radius}[,{scale}]]
% Generate a square shaped kernel of size radius*2+1, and defaulting
% to a 3x3 (radius 1).
%
% Octagon:[{radius}[,{scale}]]
% Generate octagonal shaped kernel of given radius and constant scale.
% Default radius is 3 producing a 7x7 kernel. A radius of 1 will result
% in "Diamond" kernel.
%
% Disk:[{radius}[,{scale}]]
% Generate a binary disk, thresholded at the radius given, the radius
% may be a float-point value. Final Kernel size is floor(radius)*2+1
% square. A radius of 5.3 is the default.
%
% NOTE: That a low radii Disk kernels produce the same results as
% many of the previously defined kernels, but differ greatly at larger
% radii. Here is a table of equivalences...
% "Disk:1" => "Diamond", "Octagon:1", or "Cross:1"
% "Disk:1.5" => "Square"
% "Disk:2" => "Diamond:2"
% "Disk:2.5" => "Octagon"
% "Disk:2.9" => "Square:2"
% "Disk:3.5" => "Octagon:3"
% "Disk:4.5" => "Octagon:4"
% "Disk:5.4" => "Octagon:5"
% "Disk:6.4" => "Octagon:6"
% All other Disk shapes are unique to this kernel, but because a "Disk"
% is more circular when using a larger radius, using a larger radius is
% preferred over iterating the morphological operation.
%
% Rectangle:{geometry}
% Simply generate a rectangle of 1's with the size given. You can also
% specify the location of the 'control point', otherwise the closest
% pixel to the center of the rectangle is selected.
%
% Properly centered and odd sized rectangles work the best.
%
% Symbol Dilation Kernels
%
% These kernel is not a good general morphological kernel, but is used
% more for highlighting and marking any single pixels in an image using,
% a "Dilate" method as appropriate.
%
% For the same reasons iterating these kernels does not produce the
% same result as using a larger radius for the symbol.
%
% Plus:[{radius}[,{scale}]]
% Cross:[{radius}[,{scale}]]
% Generate a kernel in the shape of a 'plus' or a 'cross' with
% a each arm the length of the given radius (default 2).
%
% NOTE: "plus:1" is equivalent to a "Diamond" kernel.
%
% Ring:{radius1},{radius2}[,{scale}]
% A ring of the values given that falls between the two radii.
% Defaults to a ring of approximataly 3 radius in a 7x7 kernel.
% This is the 'edge' pixels of the default "Disk" kernel,
% More specifically, "Ring" -> "Ring:2.5,3.5,1.0"
%
% Hit and Miss Kernels
%
% Peak:radius1,radius2
% Find any peak larger than the pixels the fall between the two radii.
% The default ring of pixels is as per "Ring".
% Edges
% Find flat orthogonal edges of a binary shape
% Corners
% Find 90 degree corners of a binary shape
% Diagonals:type
% A special kernel to thin the 'outside' of diagonals
% LineEnds:type
% Find end points of lines (for pruning a skeletion)
% Two types of lines ends (default to both) can be searched for
% Type 0: All line ends
% Type 1: single kernel for 4-conneected line ends
% Type 2: single kernel for simple line ends
% LineJunctions
% Find three line junctions (within a skeletion)
% Type 0: all line junctions
% Type 1: Y Junction kernel
% Type 2: Diagonal T Junction kernel
% Type 3: Orthogonal T Junction kernel
% Type 4: Diagonal X Junction kernel
% Type 5: Orthogonal + Junction kernel
% Ridges:type
% Find single pixel ridges or thin lines
% Type 1: Fine single pixel thick lines and ridges
% Type 2: Find two pixel thick lines and ridges
% ConvexHull
% Octagonal Thickening Kernel, to generate convex hulls of 45 degrees
% Skeleton:type
% Traditional skeleton generating kernels.
% Type 1: Tradional Skeleton kernel (4 connected skeleton)
% Type 2: HIPR2 Skeleton kernel (8 connected skeleton)
% Type 3: Thinning skeleton based on a ressearch paper by
% Dan S. Bloomberg (Default Type)
% ThinSE:type
% A huge variety of Thinning Kernels designed to preserve conectivity.
% many other kernel sets use these kernels as source definitions.
% Type numbers are 41-49, 81-89, 481, and 482 which are based on
% the super and sub notations used in the source research paper.
%
% Distance Measuring Kernels
%
% Different types of distance measuring methods, which are used with the
% a 'Distance' morphology method for generating a gradient based on
% distance from an edge of a binary shape, though there is a technique
% for handling a anti-aliased shape.
%
% See the 'Distance' Morphological Method, for information of how it is
% applied.
%
% Chebyshev:[{radius}][x{scale}[%!]]
% Chebyshev Distance (also known as Tchebychev or Chessboard distance)
% is a value of one to any neighbour, orthogonal or diagonal. One why
% of thinking of it is the number of squares a 'King' or 'Queen' in
% chess needs to traverse reach any other position on a chess board.
% It results in a 'square' like distance function, but one where
% diagonals are given a value that is closer than expected.
%
% Manhattan:[{radius}][x{scale}[%!]]
% Manhattan Distance (also known as Rectilinear, City Block, or the Taxi
% Cab distance metric), it is the distance needed when you can only
% travel in horizontal or vertical directions only. It is the
% distance a 'Rook' in chess would have to travel, and results in a
% diamond like distances, where diagonals are further than expected.
%
% Octagonal:[{radius}][x{scale}[%!]]
% An interleving of Manhatten and Chebyshev metrics producing an
% increasing octagonally shaped distance. Distances matches those of
% the "Octagon" shaped kernel of the same radius. The minimum radius
% and default is 2, producing a 5x5 kernel.
%
% Euclidean:[{radius}][x{scale}[%!]]
% Euclidean distance is the 'direct' or 'as the crow flys' distance.
% However by default the kernel size only has a radius of 1, which
% limits the distance to 'Knight' like moves, with only orthogonal and
% diagonal measurements being correct. As such for the default kernel
% you will get octagonal like distance function.
%
% However using a larger radius such as "Euclidean:4" you will get a
% much smoother distance gradient from the edge of the shape. Especially
% if the image is pre-processed to include any anti-aliasing pixels.
% Of course a larger kernel is slower to use, and not always needed.
%
% The first three Distance Measuring Kernels will only generate distances
% of exact multiples of {scale} in binary images. As such you can use a
% scale of 1 without loosing any information. However you also need some
% scaling when handling non-binary anti-aliased shapes.
%
% The "Euclidean" Distance Kernel however does generate a non-integer
% fractional results, and as such scaling is vital even for binary shapes.
%
*/
MagickExport KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type,
const GeometryInfo *args,ExceptionInfo *exception)
{
KernelInfo
*kernel;
register ssize_t
i;
register ssize_t
u,
v;
double
nan = sqrt((double)-1.0); /* Special Value : Not A Number */
/* Generate a new empty kernel if needed */
kernel=(KernelInfo *) NULL;
switch(type) {
case UndefinedKernel: /* These should not call this function */
case UserDefinedKernel:
assert("Should not call this function" != (char *) NULL);
break;
case LaplacianKernel: /* Named Descrete Convolution Kernels */
case SobelKernel: /* these are defined using other kernels */
case RobertsKernel:
case PrewittKernel:
case CompassKernel:
case KirschKernel:
case FreiChenKernel:
case EdgesKernel: /* Hit and Miss kernels */
case CornersKernel:
case DiagonalsKernel:
case LineEndsKernel:
case LineJunctionsKernel:
case RidgesKernel:
case ConvexHullKernel:
case SkeletonKernel:
case ThinSEKernel:
break; /* A pre-generated kernel is not needed */
#if 0
/* set to 1 to do a compile-time check that we haven't missed anything */
case UnityKernel:
case GaussianKernel:
case DoGKernel:
case LoGKernel:
case BlurKernel:
case CometKernel:
case BinomialKernel:
case DiamondKernel:
case SquareKernel:
case RectangleKernel:
case OctagonKernel:
case DiskKernel:
case PlusKernel:
case CrossKernel:
case RingKernel:
case PeaksKernel:
case ChebyshevKernel:
case ManhattanKernel:
case OctangonalKernel:
case EuclideanKernel:
#else
default:
#endif
/* Generate the base Kernel Structure */
kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (kernel == (KernelInfo *) NULL)
return(kernel);
(void) memset(kernel,0,sizeof(*kernel));
kernel->minimum = kernel->maximum = kernel->angle = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->type = type;
kernel->next = (KernelInfo *) NULL;
kernel->signature=MagickCoreSignature;
break;
}
switch(type) {
/*
Convolution Kernels
*/
case UnityKernel:
{
kernel->height = kernel->width = (size_t) 1;
kernel->x = kernel->y = (ssize_t) 0;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(1,sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
kernel->maximum = kernel->values[0] = args->rho;
break;
}
break;
case GaussianKernel:
case DoGKernel:
case LoGKernel:
{ double
sigma = fabs(args->sigma),
sigma2 = fabs(args->xi),
A, B, R;
if ( args->rho >= 1.0 )
kernel->width = (size_t)args->rho*2+1;
else if ( (type != DoGKernel) || (sigma >= sigma2) )
kernel->width = GetOptimalKernelWidth2D(args->rho,sigma);
else
kernel->width = GetOptimalKernelWidth2D(args->rho,sigma2);
kernel->height = kernel->width;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* WARNING: The following generates a 'sampled gaussian' kernel.
* What we really want is a 'discrete gaussian' kernel.
*
* How to do this is I don't know, but appears to be basied on the
* Error Function 'erf()' (intergral of a gaussian)
*/
if ( type == GaussianKernel || type == DoGKernel )
{ /* Calculate a Gaussian, OR positive half of a DoG */
if ( sigma > MagickEpsilon )
{ A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
B = (double) (1.0/(Magick2PI*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = exp(-((double)(u*u+v*v))*A)*B;
}
else /* limiting case - a unity (normalized Dirac) kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
}
if ( type == DoGKernel )
{ /* Subtract a Negative Gaussian for "Difference of Gaussian" */
if ( sigma2 > MagickEpsilon )
{ sigma = sigma2; /* simplify loop expressions */
A = 1.0/(2.0*sigma*sigma);
B = (double) (1.0/(Magick2PI*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] -= exp(-((double)(u*u+v*v))*A)*B;
}
else /* limiting case - a unity (normalized Dirac) kernel */
kernel->values[kernel->x+kernel->y*kernel->width] -= 1.0;
}
if ( type == LoGKernel )
{ /* Calculate a Laplacian of a Gaussian - Or Mexician Hat */
if ( sigma > MagickEpsilon )
{ A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
B = (double) (1.0/(MagickPI*sigma*sigma*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{ R = ((double)(u*u+v*v))*A;
kernel->values[i] = (1-R)*exp(-R)*B;
}
}
else /* special case - generate a unity kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
}
/* Note the above kernels may have been 'clipped' by a user defined
** radius, producing a smaller (darker) kernel. Also for very small
** sigma's (> 0.1) the central value becomes larger than one, and thus
** producing a very bright kernel.
**
** Normalization will still be needed.
*/
/* Normalize the 2D Gaussian Kernel
**
** NB: a CorrelateNormalize performs a normal Normalize if
** there are no negative values.
*/
CalcKernelMetaData(kernel); /* the other kernel meta-data */
ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue);
break;
}
case BlurKernel:
{ double
sigma = fabs(args->sigma),
alpha, beta;
if ( args->rho >= 1.0 )
kernel->width = (size_t)args->rho*2+1;
else
kernel->width = GetOptimalKernelWidth1D(args->rho,sigma);
kernel->height = 1;
kernel->x = (ssize_t) (kernel->width-1)/2;
kernel->y = 0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
#if 1
#define KernelRank 3
/* Formula derived from GetBlurKernel() in "effect.c" (plus bug fix).
** It generates a gaussian 3 times the width, and compresses it into
** the expected range. This produces a closer normalization of the
** resulting kernel, especially for very low sigma values.
** As such while wierd it is prefered.
**
** I am told this method originally came from Photoshop.
**
** A properly normalized curve is generated (apart from edge clipping)
** even though we later normalize the result (for edge clipping)
** to allow the correct generation of a "Difference of Blurs".
*/
/* initialize */
v = (ssize_t) (kernel->width*KernelRank-1)/2; /* start/end points to fit range */
(void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
/* Calculate a Positive 1D Gaussian */
if ( sigma > MagickEpsilon )
{ sigma *= KernelRank; /* simplify loop expressions */
alpha = 1.0/(2.0*sigma*sigma);
beta= (double) (1.0/(MagickSQ2PI*sigma ));
for ( u=-v; u <= v; u++) {
kernel->values[(u+v)/KernelRank] +=
exp(-((double)(u*u))*alpha)*beta;
}
}
else /* special case - generate a unity kernel */
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
#else
/* Direct calculation without curve averaging
This is equivelent to a KernelRank of 1 */
/* Calculate a Positive Gaussian */
if ( sigma > MagickEpsilon )
{ alpha = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
beta = 1.0/(MagickSQ2PI*sigma);
for ( i=0, u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = exp(-((double)(u*u))*alpha)*beta;
}
else /* special case - generate a unity kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
#endif
/* Note the above kernel may have been 'clipped' by a user defined
** radius, producing a smaller (darker) kernel. Also for very small
** sigma's (> 0.1) the central value becomes larger than one, as a
** result of not generating a actual 'discrete' kernel, and thus
** producing a very bright 'impulse'.
**
** Becuase of these two factors Normalization is required!
*/
/* Normalize the 1D Gaussian Kernel
**
** NB: a CorrelateNormalize performs a normal Normalize if
** there are no negative values.
*/
CalcKernelMetaData(kernel); /* the other kernel meta-data */
ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue);
/* rotate the 1D kernel by given angle */
RotateKernelInfo(kernel, args->xi );
break;
}
case CometKernel:
{ double
sigma = fabs(args->sigma),
A;
if ( args->rho < 1.0 )
kernel->width = (GetOptimalKernelWidth1D(args->rho,sigma)-1)/2+1;
else
kernel->width = (size_t)args->rho;
kernel->x = kernel->y = 0;
kernel->height = 1;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* A comet blur is half a 1D gaussian curve, so that the object is
** blurred in one direction only. This may not be quite the right
** curve to use so may change in the future. The function must be
** normalised after generation, which also resolves any clipping.
**
** As we are normalizing and not subtracting gaussians,
** there is no need for a divisor in the gaussian formula
**
** It is less comples
*/
if ( sigma > MagickEpsilon )
{
#if 1
#define KernelRank 3
v = (ssize_t) kernel->width*KernelRank; /* start/end points */
(void) memset(kernel->values,0, (size_t)
kernel->width*sizeof(*kernel->values));
sigma *= KernelRank; /* simplify the loop expression */
A = 1.0/(2.0*sigma*sigma);
/* B = 1.0/(MagickSQ2PI*sigma); */
for ( u=0; u < v; u++) {
kernel->values[u/KernelRank] +=
exp(-((double)(u*u))*A);
/* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */
}
for (i=0; i < (ssize_t) kernel->width; i++)
kernel->positive_range += kernel->values[i];
#else
A = 1.0/(2.0*sigma*sigma); /* simplify the loop expression */
/* B = 1.0/(MagickSQ2PI*sigma); */
for ( i=0; i < (ssize_t) kernel->width; i++)
kernel->positive_range +=
kernel->values[i] = exp(-((double)(i*i))*A);
/* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */
#endif
}
else /* special case - generate a unity kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
kernel->positive_range = 1.0;
}
kernel->minimum = 0.0;
kernel->maximum = kernel->values[0];
kernel->negative_range = 0.0;
ScaleKernelInfo(kernel, 1.0, NormalizeValue); /* Normalize */
RotateKernelInfo(kernel, args->xi); /* Rotate by angle */
break;
}
case BinomialKernel:
{
size_t
order_f;
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
order_f = fact(kernel->width-1);
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values within diamond area to scale given */
for ( i=0, v=0; v < (ssize_t)kernel->height; v++)
{ size_t
alpha = order_f / ( fact((size_t) v) * fact(kernel->height-v-1) );
for ( u=0; u < (ssize_t)kernel->width; u++, i++)
kernel->positive_range += kernel->values[i] = (double)
(alpha * order_f / ( fact((size_t) u) * fact(kernel->height-u-1) ));
}
kernel->minimum = 1.0;
kernel->maximum = kernel->values[kernel->x+kernel->y*kernel->width];
kernel->negative_range = 0.0;
break;
}
/*
Convolution Kernels - Well Known Named Constant Kernels
*/
case LaplacianKernel:
{ switch ( (int) args->rho ) {
case 0:
default: /* laplacian square filter -- default */
kernel=ParseKernelArray("3: -1,-1,-1 -1,8,-1 -1,-1,-1");
break;
case 1: /* laplacian diamond filter */
kernel=ParseKernelArray("3: 0,-1,0 -1,4,-1 0,-1,0");
break;
case 2:
kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2");
break;
case 3:
kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 1,-2,1");
break;
case 5: /* a 5x5 laplacian */
kernel=ParseKernelArray(
"5: -4,-1,0,-1,-4 -1,2,3,2,-1 0,3,4,3,0 -1,2,3,2,-1 -4,-1,0,-1,-4");
break;
case 7: /* a 7x7 laplacian */
kernel=ParseKernelArray(
"7:-10,-5,-2,-1,-2,-5,-10 -5,0,3,4,3,0,-5 -2,3,6,7,6,3,-2 -1,4,7,8,7,4,-1 -2,3,6,7,6,3,-2 -5,0,3,4,3,0,-5 -10,-5,-2,-1,-2,-5,-10" );
break;
case 15: /* a 5x5 LoG (sigma approx 1.4) */
kernel=ParseKernelArray(
"5: 0,0,-1,0,0 0,-1,-2,-1,0 -1,-2,16,-2,-1 0,-1,-2,-1,0 0,0,-1,0,0");
break;
case 19: /* a 9x9 LoG (sigma approx 1.4) */
/* http://www.cscjournals.org/csc/manuscript/Journals/IJIP/volume3/Issue1/IJIP-15.pdf */
kernel=ParseKernelArray(
"9: 0,-1,-1,-2,-2,-2,-1,-1,0 -1,-2,-4,-5,-5,-5,-4,-2,-1 -1,-4,-5,-3,-0,-3,-5,-4,-1 -2,-5,-3,12,24,12,-3,-5,-2 -2,-5,-0,24,40,24,-0,-5,-2 -2,-5,-3,12,24,12,-3,-5,-2 -1,-4,-5,-3,-0,-3,-5,-4,-1 -1,-2,-4,-5,-5,-5,-4,-2,-1 0,-1,-1,-2,-2,-2,-1,-1,0");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
break;
}
case SobelKernel:
{ /* Simple Sobel Kernel */
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case RobertsKernel:
{
kernel=ParseKernelArray("3: 0,0,0 1,-1,0 0,0,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case PrewittKernel:
{
kernel=ParseKernelArray("3: 1,0,-1 1,0,-1 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case CompassKernel:
{
kernel=ParseKernelArray("3: 1,1,-1 1,-2,-1 1,1,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case KirschKernel:
{
kernel=ParseKernelArray("3: 5,-3,-3 5,0,-3 5,-3,-3");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case FreiChenKernel:
/* Direction is set to be left to right positive */
/* http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf -- RIGHT? */
/* http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf -- WRONG? */
{ switch ( (int) args->rho ) {
default:
case 0:
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[3] = +(MagickRealType) MagickSQ2;
kernel->values[5] = -(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
break;
case 2:
kernel=ParseKernelArray("3: 1,2,0 2,0,-2 0,-2,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[1] = kernel->values[3]= +(MagickRealType) MagickSQ2;
kernel->values[5] = kernel->values[7]= -(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 10:
{
kernel=AcquireKernelInfo("FreiChen:11;FreiChen:12;FreiChen:13;FreiChen:14;FreiChen:15;FreiChen:16;FreiChen:17;FreiChen:18;FreiChen:19",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
break;
}
case 1:
case 11:
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[3] = +(MagickRealType) MagickSQ2;
kernel->values[5] = -(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 12:
kernel=ParseKernelArray("3: 1,2,1 0,0,0 1,2,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[1] = +(MagickRealType) MagickSQ2;
kernel->values[7] = +(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 13:
kernel=ParseKernelArray("3: 2,-1,0 -1,0,1 0,1,-2");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[0] = +(MagickRealType) MagickSQ2;
kernel->values[8] = -(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 14:
kernel=ParseKernelArray("3: 0,1,-2 -1,0,1 2,-1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[2] = -(MagickRealType) MagickSQ2;
kernel->values[6] = +(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 15:
kernel=ParseKernelArray("3: 0,-1,0 1,0,1 0,-1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/2.0, NoValue);
break;
case 16:
kernel=ParseKernelArray("3: 1,0,-1 0,0,0 -1,0,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/2.0, NoValue);
break;
case 17:
kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 -1,-2,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/6.0, NoValue);
break;
case 18:
kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/6.0, NoValue);
break;
case 19:
kernel=ParseKernelArray("3: 1,1,1 1,1,1 1,1,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/3.0, NoValue);
break;
}
if ( fabs(args->sigma) >= MagickEpsilon )
/* Rotate by correctly supplied 'angle' */
RotateKernelInfo(kernel, args->sigma);
else if ( args->rho > 30.0 || args->rho < -30.0 )
/* Rotate by out of bounds 'type' */
RotateKernelInfo(kernel, args->rho);
break;
}
/*
Boolean or Shaped Kernels
*/
case DiamondKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values within diamond area to scale given */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ( (labs((long) u)+labs((long) v)) <= (long) kernel->x)
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case SquareKernel:
case RectangleKernel:
{ double
scale;
if ( type == SquareKernel )
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = (size_t) (2*args->rho+1);
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
scale = args->sigma;
}
else {
/* NOTE: user defaults set in "AcquireKernelInfo()" */
if ( args->rho < 1.0 || args->sigma < 1.0 )
return(DestroyKernelInfo(kernel)); /* invalid args given */
kernel->width = (size_t)args->rho;
kernel->height = (size_t)args->sigma;
if ( args->xi < 0.0 || args->xi > (double)kernel->width ||
args->psi < 0.0 || args->psi > (double)kernel->height )
return(DestroyKernelInfo(kernel)); /* invalid args given */
kernel->x = (ssize_t) args->xi;
kernel->y = (ssize_t) args->psi;
scale = 1.0;
}
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values to scale given */
u=(ssize_t) (kernel->width*kernel->height);
for ( i=0; i < u; i++)
kernel->values[i] = scale;
kernel->minimum = kernel->maximum = scale; /* a flat shape */
kernel->positive_range = scale*u;
break;
}
case OctagonKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius = 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ( (labs((long) u)+labs((long) v)) <=
((long)kernel->x + (long)(kernel->x/2)) )
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case DiskKernel:
{
ssize_t
limit = (ssize_t)(args->rho*args->rho);
if (args->rho < 0.4) /* default radius approx 4.3 */
kernel->width = kernel->height = 9L, limit = 18L;
else
kernel->width = kernel->height = (size_t)fabs(args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ((u*u+v*v) <= limit)
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case PlusKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values along axises to given scale */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = (u == 0 || v == 0) ? args->sigma : nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
break;
}
case CrossKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values along axises to given scale */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = (u == v || u == -v) ? args->sigma : nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
break;
}
/*
HitAndMiss Kernels
*/
case RingKernel:
case PeaksKernel:
{
ssize_t
limit1,
limit2,
scale;
if (args->rho < args->sigma)
{
kernel->width = ((size_t)args->sigma)*2+1;
limit1 = (ssize_t)(args->rho*args->rho);
limit2 = (ssize_t)(args->sigma*args->sigma);
}
else
{
kernel->width = ((size_t)args->rho)*2+1;
limit1 = (ssize_t)(args->sigma*args->sigma);
limit2 = (ssize_t)(args->rho*args->rho);
}
if ( limit2 <= 0 )
kernel->width = 7L, limit1 = 7L, limit2 = 11L;
kernel->height = kernel->width;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set a ring of points of 'scale' ( 0.0 for PeaksKernel ) */
scale = (ssize_t) (( type == PeaksKernel) ? 0.0 : args->xi);
for ( i=0, v= -kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{ ssize_t radius=u*u+v*v;
if (limit1 < radius && radius <= limit2)
kernel->positive_range += kernel->values[i] = (double) scale;
else
kernel->values[i] = nan;
}
kernel->minimum = kernel->maximum = (double) scale;
if ( type == PeaksKernel ) {
/* set the central point in the middle */
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
kernel->positive_range = 1.0;
kernel->maximum = 1.0;
}
break;
}
case EdgesKernel:
{
kernel=AcquireKernelInfo("ThinSE:482",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandMirrorKernelInfo(kernel); /* mirror expansion of kernels */
break;
}
case CornersKernel:
{
kernel=AcquireKernelInfo("ThinSE:87",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* Expand 90 degree rotations */
break;
}
case DiagonalsKernel:
{
switch ( (int) args->rho ) {
case 0:
default:
{ KernelInfo
*new_kernel;
kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
new_kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
ExpandMirrorKernelInfo(kernel);
return(kernel);
}
case 1:
kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-");
break;
case 2:
kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case LineEndsKernel:
{ /* Kernels for finding the end of thin lines */
switch ( (int) args->rho ) {
case 0:
default:
/* set of kernels to find all end of lines */
return(AcquireKernelInfo("LineEnds:1>;LineEnds:2>",exception));
case 1:
/* kernel for 4-connected line ends - no rotation */
kernel=ParseKernelArray("3: 0,0,- 0,1,1 0,0,-");
break;
case 2:
/* kernel to add for 8-connected lines - no rotation */
kernel=ParseKernelArray("3: 0,0,0 0,1,0 0,0,1");
break;
case 3:
/* kernel to add for orthogonal line ends - does not find corners */
kernel=ParseKernelArray("3: 0,0,0 0,1,1 0,0,0");
break;
case 4:
/* traditional line end - fails on last T end */
kernel=ParseKernelArray("3: 0,0,0 0,1,- 0,0,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case LineJunctionsKernel:
{ /* kernels for finding the junctions of multiple lines */
switch ( (int) args->rho ) {
case 0:
default:
/* set of kernels to find all line junctions */
return(AcquireKernelInfo("LineJunctions:1@;LineJunctions:2>",exception));
case 1:
/* Y Junction */
kernel=ParseKernelArray("3: 1,-,1 -,1,- -,1,-");
break;
case 2:
/* Diagonal T Junctions */
kernel=ParseKernelArray("3: 1,-,- -,1,- 1,-,1");
break;
case 3:
/* Orthogonal T Junctions */
kernel=ParseKernelArray("3: -,-,- 1,1,1 -,1,-");
break;
case 4:
/* Diagonal X Junctions */
kernel=ParseKernelArray("3: 1,-,1 -,1,- 1,-,1");
break;
case 5:
/* Orthogonal X Junctions - minimal diamond kernel */
kernel=ParseKernelArray("3: -,1,- 1,1,1 -,1,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case RidgesKernel:
{ /* Ridges - Ridge finding kernels */
KernelInfo
*new_kernel;
switch ( (int) args->rho ) {
case 1:
default:
kernel=ParseKernelArray("3x1:0,1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 2 rotated kernels (symmetrical) */
break;
case 2:
kernel=ParseKernelArray("4x1:0,1,1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotated kernels */
/* Kernels to find a stepped 'thick' line, 4 rotates + mirrors */
/* Unfortunatally we can not yet rotate a non-square kernel */
/* But then we can't flip a non-symetrical kernel either */
new_kernel=ParseKernelArray("4x3+1+1:0,1,1,- -,1,1,- -,1,1,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+2+1:0,1,1,- -,1,1,- -,1,1,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+1+1:-,1,1,0 -,1,1,- 0,1,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+2+1:-,1,1,0 -,1,1,- 0,1,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+1:0,-,- 1,1,1 1,1,1 -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+2:0,-,- 1,1,1 1,1,1 -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+1:-,-,0 1,1,1 1,1,1 0,-,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+2:-,-,0 1,1,1 1,1,1 0,-,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
break;
}
break;
}
case ConvexHullKernel:
{
KernelInfo
*new_kernel;
/* first set of 8 kernels */
kernel=ParseKernelArray("3: 1,1,- 1,0,- 1,-,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0);
/* append the mirror versions too - no flip function yet */
new_kernel=ParseKernelArray("3: 1,1,1 1,0,- -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
ExpandRotateKernelInfo(new_kernel, 90.0);
LastKernelInfo(kernel)->next = new_kernel;
break;
}
case SkeletonKernel:
{
switch ( (int) args->rho ) {
case 1:
default:
/* Traditional Skeleton...
** A cyclically rotated single kernel
*/
kernel=AcquireKernelInfo("ThinSE:482",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 45.0); /* 8 rotations */
break;
case 2:
/* HIPR Variation of the cyclic skeleton
** Corners of the traditional method made more forgiving,
** but the retain the same cyclic order.
*/
kernel=AcquireKernelInfo("ThinSE:482; ThinSE:87x90;",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
if (kernel->next == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
kernel->type = type;
kernel->next->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotations of the 2 kernels */
break;
case 3:
/* Dan Bloomberg Skeleton, from his paper on 3x3 thinning SE's
** "Connectivity-Preserving Morphological Image Thransformations"
** by Dan S. Bloomberg, available on Leptonica, Selected Papers,
** http://www.leptonica.com/papers/conn.pdf
*/
kernel=AcquireKernelInfo("ThinSE:41; ThinSE:42; ThinSE:43",
exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->next->type = type;
kernel->next->next->type = type;
ExpandMirrorKernelInfo(kernel); /* 12 kernels total */
break;
}
break;
}
case ThinSEKernel:
{ /* Special kernels for general thinning, while preserving connections
** "Connectivity-Preserving Morphological Image Thransformations"
** by Dan S. Bloomberg, available on Leptonica, Selected Papers,
** http://www.leptonica.com/papers/conn.pdf
** And
** http://tpgit.github.com/Leptonica/ccthin_8c_source.html
**
** Note kernels do not specify the origin pixel, allowing them
** to be used for both thickening and thinning operations.
*/
switch ( (int) args->rho ) {
/* SE for 4-connected thinning */
case 41: /* SE_4_1 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 -,-,1");
break;
case 42: /* SE_4_2 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 -,0,-");
break;
case 43: /* SE_4_3 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,-,1");
break;
case 44: /* SE_4_4 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,-");
break;
case 45: /* SE_4_5 */
kernel=ParseKernelArray("3: -,0,1 0,-,1 -,0,-");
break;
case 46: /* SE_4_6 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,1");
break;
case 47: /* SE_4_7 */
kernel=ParseKernelArray("3: -,1,1 0,-,1 -,0,-");
break;
case 48: /* SE_4_8 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 0,-,1");
break;
case 49: /* SE_4_9 */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 -,-,1");
break;
/* SE for 8-connected thinning - negatives of the above */
case 81: /* SE_8_0 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 -,1,-");
break;
case 82: /* SE_8_2 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,-,-");
break;
case 83: /* SE_8_3 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 -,1,-");
break;
case 84: /* SE_8_4 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,-");
break;
case 85: /* SE_8_5 */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,-");
break;
case 86: /* SE_8_6 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,1");
break;
case 87: /* SE_8_7 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,0,-");
break;
case 88: /* SE_8_8 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,1,-");
break;
case 89: /* SE_8_9 */
kernel=ParseKernelArray("3: 0,1,- 0,-,1 -,1,-");
break;
/* Special combined SE kernels */
case 423: /* SE_4_2 , SE_4_3 Combined Kernel */
kernel=ParseKernelArray("3: -,-,1 0,-,- -,0,-");
break;
case 823: /* SE_8_2 , SE_8_3 Combined Kernel */
kernel=ParseKernelArray("3: -,1,- -,-,1 0,-,-");
break;
case 481: /* SE_48_1 - General Connected Corner Kernel */
kernel=ParseKernelArray("3: -,1,1 0,-,1 0,0,-");
break;
default:
case 482: /* SE_48_2 - General Edge Kernel */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,1");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
/*
Distance Measuring Kernels
*/
case ChebyshevKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*MagickMax(fabs((double)u),fabs((double)v)) );
kernel->maximum = kernel->values[0];
break;
}
case ManhattanKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*(labs((long) u)+labs((long) v)) );
kernel->maximum = kernel->values[0];
break;
}
case OctagonalKernel:
{
if (args->rho < 2.0)
kernel->width = kernel->height = 5; /* default/minimum radius = 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{
double
r1 = MagickMax(fabs((double)u),fabs((double)v)),
r2 = floor((double)(labs((long)u)+labs((long)v)+1)/1.5);
kernel->positive_range += kernel->values[i] =
args->sigma*MagickMax(r1,r2);
}
kernel->maximum = kernel->values[0];
break;
}
case EuclideanKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*sqrt((double)(u*u+v*v)) );
kernel->maximum = kernel->values[0];
break;
}
default:
{
/* No-Op Kernel - Basically just a single pixel on its own */
kernel=ParseKernelArray("1:1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = UndefinedKernel;
break;
}
break;
}
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneKernelInfo() creates a new clone of the given Kernel List so that its
% can be modified without effecting the original. The cloned kernel should
% be destroyed using DestoryKernelInfo() when no longer needed.
%
% The format of the CloneKernelInfo method is:
%
% KernelInfo *CloneKernelInfo(const KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to be cloned
%
*/
MagickExport KernelInfo *CloneKernelInfo(const KernelInfo *kernel)
{
register ssize_t
i;
KernelInfo
*new_kernel;
assert(kernel != (KernelInfo *) NULL);
new_kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (new_kernel == (KernelInfo *) NULL)
return(new_kernel);
*new_kernel=(*kernel); /* copy values in structure */
/* replace the values with a copy of the values */
new_kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*sizeof(*kernel->values)));
if (new_kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(new_kernel));
for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++)
new_kernel->values[i]=kernel->values[i];
/* Also clone the next kernel in the kernel list */
if ( kernel->next != (KernelInfo *) NULL ) {
new_kernel->next = CloneKernelInfo(kernel->next);
if ( new_kernel->next == (KernelInfo *) NULL )
return(DestroyKernelInfo(new_kernel));
}
return(new_kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyKernelInfo() frees the memory used by a Convolution/Morphology
% kernel.
%
% The format of the DestroyKernelInfo method is:
%
% KernelInfo *DestroyKernelInfo(KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to be destroyed
%
*/
MagickExport KernelInfo *DestroyKernelInfo(KernelInfo *kernel)
{
assert(kernel != (KernelInfo *) NULL);
if (kernel->next != (KernelInfo *) NULL)
kernel->next=DestroyKernelInfo(kernel->next);
kernel->values=(MagickRealType *) RelinquishAlignedMemory(kernel->values);
kernel=(KernelInfo *) RelinquishMagickMemory(kernel);
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E x p a n d M i r r o r K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExpandMirrorKernelInfo() takes a single kernel, and expands it into a
% sequence of 90-degree rotated kernels but providing a reflected 180
% rotatation, before the -/+ 90-degree rotations.
%
% This special rotation order produces a better, more symetrical thinning of
% objects.
%
% The format of the ExpandMirrorKernelInfo method is:
%
% void ExpandMirrorKernelInfo(KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% This function is only internel to this module, as it is not finalized,
% especially with regard to non-orthogonal angles, and rotation of larger
% 2D kernels.
*/
#if 0
static void FlopKernelInfo(KernelInfo *kernel)
{ /* Do a Flop by reversing each row. */
size_t
y;
register ssize_t
x,r;
register double
*k,t;
for ( y=0, k=kernel->values; y < kernel->height; y++, k+=kernel->width)
for ( x=0, r=kernel->width-1; x<kernel->width/2; x++, r--)
t=k[x], k[x]=k[r], k[r]=t;
kernel->x = kernel->width - kernel->x - 1;
angle = fmod(angle+180.0, 360.0);
}
#endif
static void ExpandMirrorKernelInfo(KernelInfo *kernel)
{
KernelInfo
*clone,
*last;
last = kernel;
clone = CloneKernelInfo(last);
if (clone == (KernelInfo *) NULL)
return;
RotateKernelInfo(clone, 180); /* flip */
LastKernelInfo(last)->next = clone;
last = clone;
clone = CloneKernelInfo(last);
if (clone == (KernelInfo *) NULL)
return;
RotateKernelInfo(clone, 90); /* transpose */
LastKernelInfo(last)->next = clone;
last = clone;
clone = CloneKernelInfo(last);
if (clone == (KernelInfo *) NULL)
return;
RotateKernelInfo(clone, 180); /* flop */
LastKernelInfo(last)->next = clone;
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E x p a n d R o t a t e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExpandRotateKernelInfo() takes a kernel list, and expands it by rotating
% incrementally by the angle given, until the kernel repeats.
%
% WARNING: 45 degree rotations only works for 3x3 kernels.
% While 90 degree roatations only works for linear and square kernels
%
% The format of the ExpandRotateKernelInfo method is:
%
% void ExpandRotateKernelInfo(KernelInfo *kernel, double angle)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o angle: angle to rotate in degrees
%
% This function is only internel to this module, as it is not finalized,
% especially with regard to non-orthogonal angles, and rotation of larger
% 2D kernels.
*/
/* Internal Routine - Return true if two kernels are the same */
static MagickBooleanType SameKernelInfo(const KernelInfo *kernel1,
const KernelInfo *kernel2)
{
register size_t
i;
/* check size and origin location */
if ( kernel1->width != kernel2->width
|| kernel1->height != kernel2->height
|| kernel1->x != kernel2->x
|| kernel1->y != kernel2->y )
return MagickFalse;
/* check actual kernel values */
for (i=0; i < (kernel1->width*kernel1->height); i++) {
/* Test for Nan equivalence */
if ( IsNaN(kernel1->values[i]) && !IsNaN(kernel2->values[i]) )
return MagickFalse;
if ( IsNaN(kernel2->values[i]) && !IsNaN(kernel1->values[i]) )
return MagickFalse;
/* Test actual values are equivalent */
if ( fabs(kernel1->values[i] - kernel2->values[i]) >= MagickEpsilon )
return MagickFalse;
}
return MagickTrue;
}
static void ExpandRotateKernelInfo(KernelInfo *kernel,const double angle)
{
KernelInfo
*clone_info,
*last;
clone_info=(KernelInfo *) NULL;
last=kernel;
DisableMSCWarning(4127)
while (1) {
RestoreMSCWarning
clone_info=CloneKernelInfo(last);
if (clone_info == (KernelInfo *) NULL)
break;
RotateKernelInfo(clone_info,angle);
if (SameKernelInfo(kernel,clone_info) != MagickFalse)
break;
LastKernelInfo(last)->next=clone_info;
last=clone_info;
}
if (clone_info != (KernelInfo *) NULL)
clone_info=DestroyKernelInfo(clone_info); /* kernel repeated - junk */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a l c M e t a K e r n a l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CalcKernelMetaData() recalculate the KernelInfo meta-data of this kernel only,
% using the kernel values. This should only ne used if it is not possible to
% calculate that meta-data in some easier way.
%
% It is important that the meta-data is correct before ScaleKernelInfo() is
% used to perform kernel normalization.
%
% The format of the CalcKernelMetaData method is:
%
% void CalcKernelMetaData(KernelInfo *kernel, const double scale )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to modify
%
% WARNING: Minimum and Maximum values are assumed to include zero, even if
% zero is not part of the kernel (as in Gaussian Derived kernels). This
% however is not true for flat-shaped morphological kernels.
%
% WARNING: Only the specific kernel pointed to is modified, not a list of
% multiple kernels.
%
% This is an internal function and not expected to be useful outside this
% module. This could change however.
*/
static void CalcKernelMetaData(KernelInfo *kernel)
{
register size_t
i;
kernel->minimum = kernel->maximum = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
for (i=0; i < (kernel->width*kernel->height); i++)
{
if ( fabs(kernel->values[i]) < MagickEpsilon )
kernel->values[i] = 0.0;
( kernel->values[i] < 0)
? ( kernel->negative_range += kernel->values[i] )
: ( kernel->positive_range += kernel->values[i] );
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
}
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h o l o g y A p p l y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MorphologyApply() applies a morphological method, multiple times using
% a list of multiple kernels. This is the method that should be called by
% other 'operators' that internally use morphology operations as part of
% their processing.
%
% It is basically equivalent to as MorphologyImage() (see below) but without
% any user controls. This allows internel programs to use this method to
% perform a specific task without possible interference by any API user
% supplied settings.
%
% It is MorphologyImage() task to extract any such user controls, and
% pass them to this function for processing.
%
% More specifically all given kernels should already be scaled, normalised,
% and blended appropriatally before being parred to this routine. The
% appropriate bias, and compose (typically 'UndefinedComposeOp') given.
%
% The format of the MorphologyApply method is:
%
% Image *MorphologyApply(const Image *image,MorphologyMethod method,
% const ssize_t iterations,const KernelInfo *kernel,
% const CompositeMethod compose,const double bias,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the source image
%
% o method: the morphology method to be applied.
%
% o iterations: apply the operation this many times (or no change).
% A value of -1 means loop until no change found.
% How this is applied may depend on the morphology method.
% Typically this is a value of 1.
%
% o channel: the channel type.
%
% o kernel: An array of double representing the morphology kernel.
%
% o compose: How to handle or merge multi-kernel results.
% If 'UndefinedCompositeOp' use default for the Morphology method.
% If 'NoCompositeOp' force image to be re-iterated by each kernel.
% Otherwise merge the results using the compose method given.
%
% o bias: Convolution Output Bias.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t MorphologyPrimitive(const Image *image,Image *morphology_image,
const MorphologyMethod method,const KernelInfo *kernel,const double bias,
ExceptionInfo *exception)
{
#define MorphologyTag "Morphology/Image"
CacheView
*image_view,
*morphology_view;
OffsetInfo
offset;
register ssize_t
j,
y;
size_t
*changes,
changed,
width;
MagickBooleanType
status;
MagickOffsetType
progress;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(morphology_image != (Image *) NULL);
assert(morphology_image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
morphology_view=AcquireAuthenticCacheView(morphology_image,exception);
width=image->columns+kernel->width-1;
offset.x=0;
offset.y=0;
switch (method)
{
case ConvolveMorphology:
case DilateMorphology:
case DilateIntensityMorphology:
case IterativeDistanceMorphology:
{
/*
Kernel needs to used with reflection about origin.
*/
offset.x=(ssize_t) kernel->width-kernel->x-1;
offset.y=(ssize_t) kernel->height-kernel->y-1;
break;
}
case ErodeMorphology:
case ErodeIntensityMorphology:
case HitAndMissMorphology:
case ThinningMorphology:
case ThickenMorphology:
{
offset.x=kernel->x;
offset.y=kernel->y;
break;
}
default:
{
assert("Not a Primitive Morphology Method" != (char *) NULL);
break;
}
}
changed=0;
changes=(size_t *) AcquireQuantumMemory(GetOpenMPMaximumThreads(),
sizeof(*changes));
if (changes == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++)
changes[j]=0;
if ((method == ConvolveMorphology) && (kernel->width == 1))
{
register ssize_t
x;
/*
Special handling (for speed) of vertical (blur) kernels. This performs
its handling in columns rather than in rows. This is only done
for convolve as it is the only method that generates very large 1-D
vertical kernels (such as a 'BlurKernel')
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,morphology_image,image->columns,1)
#endif
for (x=0; x < (ssize_t) image->columns; x++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
r;
ssize_t
center;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,x,-offset.y,1,image->rows+
kernel->height-1,exception);
q=GetCacheViewAuthenticPixels(morphology_view,x,0,1,
morphology_image->rows,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) GetPixelChannels(image)*offset.y;
for (r=0; r < (ssize_t) image->rows; r++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
morphology_traits,
traits;
register const MagickRealType
*magick_restrict k;
register const Quantum
*magick_restrict pixels;
register ssize_t
v;
size_t
count;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
morphology_traits=GetPixelChannelTraits(morphology_image,channel);
if ((traits == UndefinedPixelTrait) ||
(morphology_traits == UndefinedPixelTrait))
continue;
if ((traits & CopyPixelTrait) != 0)
{
SetPixelChannel(morphology_image,channel,p[center+i],q);
continue;
}
k=(&kernel->values[kernel->height-1]);
pixels=p;
pixel=bias;
gamma=1.0;
count=0;
if (((image->alpha_trait & BlendPixelTrait) == 0) ||
((morphology_traits & BlendPixelTrait) == 0))
for (v=0; v < (ssize_t) kernel->height; v++)
{
if (!IsNaN(*k))
{
pixel+=(*k)*pixels[i];
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
else
{
gamma=0.0;
for (v=0; v < (ssize_t) kernel->height; v++)
{
if (!IsNaN(*k))
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels));
pixel+=alpha*(*k)*pixels[i];
gamma+=alpha*(*k);
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
}
if (fabs(pixel-p[center+i]) > MagickEpsilon)
changes[id]++;
gamma=PerceptibleReciprocal(gamma);
if (count != 0)
gamma*=(double) kernel->height/count;
SetPixelChannel(morphology_image,channel,ClampToQuantum(gamma*
pixel),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(morphology_image);
}
if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,MorphologyTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
morphology_image->type=image->type;
morphology_view=DestroyCacheView(morphology_view);
image_view=DestroyCacheView(image_view);
for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++)
changed+=changes[j];
changes=(size_t *) RelinquishMagickMemory(changes);
return(status ? (ssize_t) changed : 0);
}
/*
Normal handling of horizontal or rectangular kernels (row by row).
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,morphology_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
center;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-offset.x,y-offset.y,width,
kernel->height,exception);
q=GetCacheViewAuthenticPixels(morphology_view,0,y,morphology_image->columns,
1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) (GetPixelChannels(image)*width*offset.y+
GetPixelChannels(image)*offset.x);
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
intensity,
maximum,
minimum,
pixel;
PixelChannel
channel;
PixelTrait
morphology_traits,
traits;
register const MagickRealType
*magick_restrict k;
register const Quantum
*magick_restrict pixels,
*magick_restrict quantum_pixels;
register ssize_t
u;
size_t
count;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
morphology_traits=GetPixelChannelTraits(morphology_image,channel);
if ((traits == UndefinedPixelTrait) ||
(morphology_traits == UndefinedPixelTrait))
continue;
if ((traits & CopyPixelTrait) != 0)
{
SetPixelChannel(morphology_image,channel,p[center+i],q);
continue;
}
pixels=p;
quantum_pixels=(const Quantum *) NULL;
maximum=0.0;
minimum=(double) QuantumRange;
switch (method)
{
case ConvolveMorphology:
{
pixel=bias;
break;
}
case DilateMorphology:
case ErodeIntensityMorphology:
{
pixel=0.0;
break;
}
case HitAndMissMorphology:
case ErodeMorphology:
{
pixel=QuantumRange;
break;
}
default:
{
pixel=(double) p[center+i];
break;
}
}
count=0;
gamma=1.0;
switch (method)
{
case ConvolveMorphology:
{
/*
Weighted Average of pixels using reflected kernel
For correct working of this operation for asymetrical kernels,
the kernel needs to be applied in its reflected form. That is
its values needs to be reversed.
Correlation is actually the same as this but without reflecting
the kernel, and thus 'lower-level' that Convolution. However as
Convolution is the more common method used, and it does not
really cost us much in terms of processing to use a reflected
kernel, so it is Convolution that is implemented.
Correlation will have its kernel reflected before calling this
function to do a Convolve.
For more details of Correlation vs Convolution see
http://www.cs.umd.edu/~djacobs/CMSC426/Convolution.pdf
*/
k=(&kernel->values[kernel->width*kernel->height-1]);
if (((image->alpha_trait & BlendPixelTrait) == 0) ||
((morphology_traits & BlendPixelTrait) == 0))
{
/*
No alpha blending.
*/
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
pixel+=(*k)*pixels[i];
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
/*
Alpha blending.
*/
gamma=0.0;
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels));
pixel+=alpha*(*k)*pixels[i];
gamma+=alpha*(*k);
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case ErodeMorphology:
{
/*
Minimum value within kernel neighbourhood.
The kernel is not reflected for this operation. In normal
Greyscale Morphology, the kernel value should be added
to the real value, this is currently not done, due to the
nature of the boolean kernels being used.
*/
k=kernel->values;
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k) && (*k >= 0.5))
{
if ((double) pixels[i] < pixel)
pixel=(double) pixels[i];
}
k++;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case DilateMorphology:
{
/*
Maximum value within kernel neighbourhood.
For correct working of this operation for asymetrical kernels,
the kernel needs to be applied in its reflected form. That is
its values needs to be reversed.
In normal Greyscale Morphology, the kernel value should be
added to the real value, this is currently not done, due to the
nature of the boolean kernels being used.
*/
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k) && (*k > 0.5))
{
if ((double) pixels[i] > pixel)
pixel=(double) pixels[i];
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case HitAndMissMorphology:
case ThinningMorphology:
case ThickenMorphology:
{
/*
Minimum of foreground pixel minus maxumum of background pixels.
The kernel is not reflected for this operation, and consists
of both foreground and background pixel neighbourhoods, 0.0 for
background, and 1.0 for foreground with either Nan or 0.5 values
for don't care.
This never produces a meaningless negative result. Such results
cause Thinning/Thicken to not work correctly when used against a
greyscale image.
*/
k=kernel->values;
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if (*k > 0.7)
{
if ((double) pixels[i] < pixel)
pixel=(double) pixels[i];
}
else
if (*k < 0.3)
{
if ((double) pixels[i] > maximum)
maximum=(double) pixels[i];
}
count++;
}
k++;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
pixel-=maximum;
if (pixel < 0.0)
pixel=0.0;
if (method == ThinningMorphology)
pixel=(double) p[center+i]-pixel;
else
if (method == ThickenMorphology)
pixel+=(double) p[center+i]+pixel;
break;
}
case ErodeIntensityMorphology:
{
/*
Select pixel with minimum intensity within kernel neighbourhood.
The kernel is not reflected for this operation.
*/
k=kernel->values;
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k) && (*k >= 0.5))
{
intensity=(double) GetPixelIntensity(image,pixels);
if (intensity < minimum)
{
quantum_pixels=pixels;
pixel=(double) pixels[i];
minimum=intensity;
}
count++;
}
k++;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case DilateIntensityMorphology:
{
/*
Select pixel with maximum intensity within kernel neighbourhood.
The kernel is not reflected for this operation.
*/
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k) && (*k >= 0.5))
{
intensity=(double) GetPixelIntensity(image,pixels);
if (intensity > maximum)
{
pixel=(double) pixels[i];
quantum_pixels=pixels;
maximum=intensity;
}
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case IterativeDistanceMorphology:
{
/*
Compute th iterative distance from black edge of a white image
shape. Essentually white values are decreased to the smallest
'distance from edge' it can find.
It works by adding kernel values to the neighbourhood, and
select the minimum value found. The kernel is rotated before
use, so kernel distances match resulting distances, when a user
provided asymmetric kernel is applied.
This code is nearly identical to True GrayScale Morphology but
not quite.
GreyDilate Kernel values added, maximum value found Kernel is
rotated before use.
GrayErode: Kernel values subtracted and minimum value found No
kernel rotation used.
Note the Iterative Distance method is essentially a
GrayErode, but with negative kernel values, and kernel rotation
applied.
*/
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case UndefinedMorphology:
default:
break;
}
if (fabs(pixel-p[center+i]) > MagickEpsilon)
changes[id]++;
if (quantum_pixels != (const Quantum *) NULL)
{
SetPixelChannel(morphology_image,channel,quantum_pixels[i],q);
continue;
}
gamma=PerceptibleReciprocal(gamma);
if (count != 0)
gamma*=(double) kernel->height*kernel->width/count;
SetPixelChannel(morphology_image,channel,ClampToQuantum(gamma*pixel),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(morphology_image);
}
if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,MorphologyTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
morphology_view=DestroyCacheView(morphology_view);
image_view=DestroyCacheView(image_view);
for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++)
changed+=changes[j];
changes=(size_t *) RelinquishMagickMemory(changes);
return(status ? (ssize_t) changed : -1);
}
/*
This is almost identical to the MorphologyPrimative() function above, but
applies the primitive directly to the actual image using two passes, once in
each direction, with the results of the previous (and current) row being
re-used.
That is after each row is 'Sync'ed' into the image, the next row makes use of
those values as part of the calculation of the next row. It repeats, but
going in the oppisite (bottom-up) direction.
Because of this 're-use of results' this function can not make use of multi-
threaded, parellel processing.
*/
static ssize_t MorphologyPrimitiveDirect(Image *image,
const MorphologyMethod method,const KernelInfo *kernel,
ExceptionInfo *exception)
{
CacheView
*morphology_view,
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
OffsetInfo
offset;
size_t
width,
changed;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=MagickTrue;
changed=0;
progress=0;
switch(method)
{
case DistanceMorphology:
case VoronoiMorphology:
{
/*
Kernel reflected about origin.
*/
offset.x=(ssize_t) kernel->width-kernel->x-1;
offset.y=(ssize_t) kernel->height-kernel->y-1;
break;
}
default:
{
offset.x=kernel->x;
offset.y=kernel->y;
break;
}
}
/*
Two views into same image, do not thread.
*/
image_view=AcquireVirtualCacheView(image,exception);
morphology_view=AcquireAuthenticCacheView(image,exception);
width=image->columns+kernel->width-1;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
/*
Read virtual pixels, and authentic pixels, from the same image! We read
using virtual to get virtual pixel handling, but write back into the same
image.
Only top half of kernel is processed as we do a single pass downward
through the image iterating the distance function as we go.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-offset.x,y-offset.y,width,(size_t)
offset.y+1,exception);
q=GetCacheViewAuthenticPixels(morphology_view,0,y,image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
pixel;
PixelChannel
channel;
PixelTrait
traits;
register const MagickRealType
*magick_restrict k;
register const Quantum
*magick_restrict pixels;
register ssize_t
u;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & CopyPixelTrait) != 0)
continue;
pixels=p;
pixel=(double) QuantumRange;
switch (method)
{
case DistanceMorphology:
{
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v <= offset.y; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
pixels=q-offset.x*GetPixelChannels(image);
for (u=0; u < offset.x; u++)
{
if (!IsNaN(*k) && ((x+u-offset.x) >= 0))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
break;
}
case VoronoiMorphology:
{
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v < offset.y; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
pixels=q-offset.x*GetPixelChannels(image);
for (u=0; u < offset.x; u++)
{
if (!IsNaN(*k) && ((x+u-offset.x) >= 0))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
break;
}
default:
break;
}
if (fabs(pixel-q[i]) > MagickEpsilon)
changed++;
q[i]=ClampToQuantum(pixel);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,MorphologyTag,progress,2*image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
morphology_view=DestroyCacheView(morphology_view);
image_view=DestroyCacheView(image_view);
/*
Do the reverse pass through the image.
*/
image_view=AcquireVirtualCacheView(image,exception);
morphology_view=AcquireAuthenticCacheView(image,exception);
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
/*
Read virtual pixels, and authentic pixels, from the same image. We
read using virtual to get virtual pixel handling, but write back
into the same image.
Only the bottom half of the kernel is processed as we up the image.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-offset.x,y,width,(size_t)
kernel->y+1,exception);
q=GetCacheViewAuthenticPixels(morphology_view,0,y,image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
p+=(image->columns-1)*GetPixelChannels(image);
q+=(image->columns-1)*GetPixelChannels(image);
for (x=(ssize_t) image->columns-1; x >= 0; x--)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
pixel;
PixelChannel
channel;
PixelTrait
traits;
register const MagickRealType
*magick_restrict k;
register const Quantum
*magick_restrict pixels;
register ssize_t
u;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & CopyPixelTrait) != 0)
continue;
pixels=p;
pixel=(double) QuantumRange;
switch (method)
{
case DistanceMorphology:
{
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
for (v=offset.y; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
k=(&kernel->values[kernel->width*kernel->y+kernel->x-1]);
pixels=q;
for (u=offset.x+1; u < (ssize_t) kernel->width; u++)
{
pixels+=GetPixelChannels(image);
if (!IsNaN(*k) && ((x+u-offset.x) < (ssize_t) image->columns))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
}
break;
}
case VoronoiMorphology:
{
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
for (v=offset.y; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
pixels=q;
for (u=offset.x+1; u < (ssize_t) kernel->width; u++)
{
pixels+=GetPixelChannels(image);
if (!IsNaN(*k) && ((x+u-offset.x) < (ssize_t) image->columns))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
}
break;
}
default:
break;
}
if (fabs(pixel-q[i]) > MagickEpsilon)
changed++;
q[i]=ClampToQuantum(pixel);
}
p-=GetPixelChannels(image);
q-=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,MorphologyTag,progress,2*image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
morphology_view=DestroyCacheView(morphology_view);
image_view=DestroyCacheView(image_view);
return(status ? (ssize_t) changed : -1);
}
/*
Apply a Morphology by calling one of the above low level primitive
application functions. This function handles any iteration loops,
composition or re-iteration of results, and compound morphology methods that
is based on multiple low-level (staged) morphology methods.
Basically this provides the complex glue between the requested morphology
method and raw low-level implementation (above).
*/
MagickPrivate Image *MorphologyApply(const Image *image,
const MorphologyMethod method, const ssize_t iterations,
const KernelInfo *kernel, const CompositeOperator compose,const double bias,
ExceptionInfo *exception)
{
CompositeOperator
curr_compose;
Image
*curr_image, /* Image we are working with or iterating */
*work_image, /* secondary image for primitive iteration */
*save_image, /* saved image - for 'edge' method only */
*rslt_image; /* resultant image - after multi-kernel handling */
KernelInfo
*reflected_kernel, /* A reflected copy of the kernel (if needed) */
*norm_kernel, /* the current normal un-reflected kernel */
*rflt_kernel, /* the current reflected kernel (if needed) */
*this_kernel; /* the kernel being applied */
MorphologyMethod
primitive; /* the current morphology primitive being applied */
CompositeOperator
rslt_compose; /* multi-kernel compose method for results to use */
MagickBooleanType
special, /* do we use a direct modify function? */
verbose; /* verbose output of results */
size_t
method_loop, /* Loop 1: number of compound method iterations (norm 1) */
method_limit, /* maximum number of compound method iterations */
kernel_number, /* Loop 2: the kernel number being applied */
stage_loop, /* Loop 3: primitive loop for compound morphology */
stage_limit, /* how many primitives are in this compound */
kernel_loop, /* Loop 4: iterate the kernel over image */
kernel_limit, /* number of times to iterate kernel */
count, /* total count of primitive steps applied */
kernel_changed, /* total count of changed using iterated kernel */
method_changed; /* total count of changed over method iteration */
ssize_t
changed; /* number pixels changed by last primitive operation */
char
v_info[MagickPathExtent];
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
count = 0; /* number of low-level morphology primitives performed */
if ( iterations == 0 )
return((Image *) NULL); /* null operation - nothing to do! */
kernel_limit = (size_t) iterations;
if ( iterations < 0 ) /* negative interations = infinite (well alomst) */
kernel_limit = image->columns>image->rows ? image->columns : image->rows;
verbose = IsStringTrue(GetImageArtifact(image,"debug"));
/* initialise for cleanup */
curr_image = (Image *) image;
curr_compose = image->compose;
(void) curr_compose;
work_image = save_image = rslt_image = (Image *) NULL;
reflected_kernel = (KernelInfo *) NULL;
/* Initialize specific methods
* + which loop should use the given iteratations
* + how many primitives make up the compound morphology
* + multi-kernel compose method to use (by default)
*/
method_limit = 1; /* just do method once, unless otherwise set */
stage_limit = 1; /* assume method is not a compound */
special = MagickFalse; /* assume it is NOT a direct modify primitive */
rslt_compose = compose; /* and we are composing multi-kernels as given */
switch( method ) {
case SmoothMorphology: /* 4 primitive compound morphology */
stage_limit = 4;
break;
case OpenMorphology: /* 2 primitive compound morphology */
case OpenIntensityMorphology:
case TopHatMorphology:
case CloseMorphology:
case CloseIntensityMorphology:
case BottomHatMorphology:
case EdgeMorphology:
stage_limit = 2;
break;
case HitAndMissMorphology:
rslt_compose = LightenCompositeOp; /* Union of multi-kernel results */
/* FALL THUR */
case ThinningMorphology:
case ThickenMorphology:
method_limit = kernel_limit; /* iterate the whole method */
kernel_limit = 1; /* do not do kernel iteration */
break;
case DistanceMorphology:
case VoronoiMorphology:
special = MagickTrue; /* use special direct primative */
break;
default:
break;
}
/* Apply special methods with special requirments
** For example, single run only, or post-processing requirements
*/
if ( special != MagickFalse )
{
rslt_image=CloneImage(image,0,0,MagickTrue,exception);
if (rslt_image == (Image *) NULL)
goto error_cleanup;
if (SetImageStorageClass(rslt_image,DirectClass,exception) == MagickFalse)
goto error_cleanup;
changed=MorphologyPrimitiveDirect(rslt_image,method,kernel,exception);
if (verbose != MagickFalse)
(void) (void) FormatLocaleFile(stderr,
"%s:%.20g.%.20g #%.20g => Changed %.20g\n",
CommandOptionToMnemonic(MagickMorphologyOptions, method),
1.0,0.0,1.0, (double) changed);
if ( changed < 0 )
goto error_cleanup;
if ( method == VoronoiMorphology ) {
/* Preserve the alpha channel of input image - but turned it off */
(void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel,
exception);
(void) CompositeImage(rslt_image,image,CopyAlphaCompositeOp,
MagickTrue,0,0,exception);
(void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel,
exception);
}
goto exit_cleanup;
}
/* Handle user (caller) specified multi-kernel composition method */
if ( compose != UndefinedCompositeOp )
rslt_compose = compose; /* override default composition for method */
if ( rslt_compose == UndefinedCompositeOp )
rslt_compose = NoCompositeOp; /* still not defined! Then re-iterate */
/* Some methods require a reflected kernel to use with primitives.
* Create the reflected kernel for those methods. */
switch ( method ) {
case CorrelateMorphology:
case CloseMorphology:
case CloseIntensityMorphology:
case BottomHatMorphology:
case SmoothMorphology:
reflected_kernel = CloneKernelInfo(kernel);
if (reflected_kernel == (KernelInfo *) NULL)
goto error_cleanup;
RotateKernelInfo(reflected_kernel,180);
break;
default:
break;
}
/* Loops around more primitive morpholgy methods
** erose, dilate, open, close, smooth, edge, etc...
*/
/* Loop 1: iterate the compound method */
method_loop = 0;
method_changed = 1;
while ( method_loop < method_limit && method_changed > 0 ) {
method_loop++;
method_changed = 0;
/* Loop 2: iterate over each kernel in a multi-kernel list */
norm_kernel = (KernelInfo *) kernel;
this_kernel = (KernelInfo *) kernel;
rflt_kernel = reflected_kernel;
kernel_number = 0;
while ( norm_kernel != NULL ) {
/* Loop 3: Compound Morphology Staging - Select Primative to apply */
stage_loop = 0; /* the compound morphology stage number */
while ( stage_loop < stage_limit ) {
stage_loop++; /* The stage of the compound morphology */
/* Select primitive morphology for this stage of compound method */
this_kernel = norm_kernel; /* default use unreflected kernel */
primitive = method; /* Assume method is a primitive */
switch( method ) {
case ErodeMorphology: /* just erode */
case EdgeInMorphology: /* erode and image difference */
primitive = ErodeMorphology;
break;
case DilateMorphology: /* just dilate */
case EdgeOutMorphology: /* dilate and image difference */
primitive = DilateMorphology;
break;
case OpenMorphology: /* erode then dialate */
case TopHatMorphology: /* open and image difference */
primitive = ErodeMorphology;
if ( stage_loop == 2 )
primitive = DilateMorphology;
break;
case OpenIntensityMorphology:
primitive = ErodeIntensityMorphology;
if ( stage_loop == 2 )
primitive = DilateIntensityMorphology;
break;
case CloseMorphology: /* dilate, then erode */
case BottomHatMorphology: /* close and image difference */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateMorphology;
if ( stage_loop == 2 )
primitive = ErodeMorphology;
break;
case CloseIntensityMorphology:
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateIntensityMorphology;
if ( stage_loop == 2 )
primitive = ErodeIntensityMorphology;
break;
case SmoothMorphology: /* open, close */
switch ( stage_loop ) {
case 1: /* start an open method, which starts with Erode */
primitive = ErodeMorphology;
break;
case 2: /* now Dilate the Erode */
primitive = DilateMorphology;
break;
case 3: /* Reflect kernel a close */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateMorphology;
break;
case 4: /* Finish the Close */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = ErodeMorphology;
break;
}
break;
case EdgeMorphology: /* dilate and erode difference */
primitive = DilateMorphology;
if ( stage_loop == 2 ) {
save_image = curr_image; /* save the image difference */
curr_image = (Image *) image;
primitive = ErodeMorphology;
}
break;
case CorrelateMorphology:
/* A Correlation is a Convolution with a reflected kernel.
** However a Convolution is a weighted sum using a reflected
** kernel. It may seem stange to convert a Correlation into a
** Convolution as the Correlation is the simplier method, but
** Convolution is much more commonly used, and it makes sense to
** implement it directly so as to avoid the need to duplicate the
** kernel when it is not required (which is typically the
** default).
*/
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = ConvolveMorphology;
break;
default:
break;
}
assert( this_kernel != (KernelInfo *) NULL );
/* Extra information for debugging compound operations */
if (verbose != MagickFalse) {
if ( stage_limit > 1 )
(void) FormatLocaleString(v_info,MagickPathExtent,"%s:%.20g.%.20g -> ",
CommandOptionToMnemonic(MagickMorphologyOptions,method),(double)
method_loop,(double) stage_loop);
else if ( primitive != method )
(void) FormatLocaleString(v_info, MagickPathExtent, "%s:%.20g -> ",
CommandOptionToMnemonic(MagickMorphologyOptions, method),(double)
method_loop);
else
v_info[0] = '\0';
}
/* Loop 4: Iterate the kernel with primitive */
kernel_loop = 0;
kernel_changed = 0;
changed = 1;
while ( kernel_loop < kernel_limit && changed > 0 ) {
kernel_loop++; /* the iteration of this kernel */
/* Create a clone as the destination image, if not yet defined */
if ( work_image == (Image *) NULL )
{
work_image=CloneImage(image,0,0,MagickTrue,exception);
if (work_image == (Image *) NULL)
goto error_cleanup;
if (SetImageStorageClass(work_image,DirectClass,exception) == MagickFalse)
goto error_cleanup;
}
/* APPLY THE MORPHOLOGICAL PRIMITIVE (curr -> work) */
count++;
changed = MorphologyPrimitive(curr_image, work_image, primitive,
this_kernel, bias, exception);
if (verbose != MagickFalse) {
if ( kernel_loop > 1 )
(void) FormatLocaleFile(stderr, "\n"); /* add end-of-line from previous */
(void) (void) FormatLocaleFile(stderr,
"%s%s%s:%.20g.%.20g #%.20g => Changed %.20g",
v_info,CommandOptionToMnemonic(MagickMorphologyOptions,
primitive),(this_kernel == rflt_kernel ) ? "*" : "",
(double) (method_loop+kernel_loop-1),(double) kernel_number,
(double) count,(double) changed);
}
if ( changed < 0 )
goto error_cleanup;
kernel_changed += changed;
method_changed += changed;
/* prepare next loop */
{ Image *tmp = work_image; /* swap images for iteration */
work_image = curr_image;
curr_image = tmp;
}
if ( work_image == image )
work_image = (Image *) NULL; /* replace input 'image' */
} /* End Loop 4: Iterate the kernel with primitive */
if (verbose != MagickFalse && kernel_changed != (size_t)changed)
(void) FormatLocaleFile(stderr, " Total %.20g",(double) kernel_changed);
if (verbose != MagickFalse && stage_loop < stage_limit)
(void) FormatLocaleFile(stderr, "\n"); /* add end-of-line before looping */
#if 0
(void) FormatLocaleFile(stderr, "--E-- image=0x%lx\n", (unsigned long)image);
(void) FormatLocaleFile(stderr, " curr =0x%lx\n", (unsigned long)curr_image);
(void) FormatLocaleFile(stderr, " work =0x%lx\n", (unsigned long)work_image);
(void) FormatLocaleFile(stderr, " save =0x%lx\n", (unsigned long)save_image);
(void) FormatLocaleFile(stderr, " union=0x%lx\n", (unsigned long)rslt_image);
#endif
} /* End Loop 3: Primative (staging) Loop for Coumpound Methods */
/* Final Post-processing for some Compound Methods
**
** The removal of any 'Sync' channel flag in the Image Compositon
** below ensures the methematical compose method is applied in a
** purely mathematical way, and only to the selected channels.
** Turn off SVG composition 'alpha blending'.
*/
switch( method ) {
case EdgeOutMorphology:
case EdgeInMorphology:
case TopHatMorphology:
case BottomHatMorphology:
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr,
"\n%s: Difference with original image",CommandOptionToMnemonic(
MagickMorphologyOptions, method) );
(void) CompositeImage(curr_image,image,DifferenceCompositeOp,
MagickTrue,0,0,exception);
break;
case EdgeMorphology:
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr,
"\n%s: Difference of Dilate and Erode",CommandOptionToMnemonic(
MagickMorphologyOptions, method) );
(void) CompositeImage(curr_image,save_image,DifferenceCompositeOp,
MagickTrue,0,0,exception);
save_image = DestroyImage(save_image); /* finished with save image */
break;
default:
break;
}
/* multi-kernel handling: re-iterate, or compose results */
if ( kernel->next == (KernelInfo *) NULL )
rslt_image = curr_image; /* just return the resulting image */
else if ( rslt_compose == NoCompositeOp )
{ if (verbose != MagickFalse) {
if ( this_kernel->next != (KernelInfo *) NULL )
(void) FormatLocaleFile(stderr, " (re-iterate)");
else
(void) FormatLocaleFile(stderr, " (done)");
}
rslt_image = curr_image; /* return result, and re-iterate */
}
else if ( rslt_image == (Image *) NULL)
{ if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr, " (save for compose)");
rslt_image = curr_image;
curr_image = (Image *) image; /* continue with original image */
}
else
{ /* Add the new 'current' result to the composition
**
** The removal of any 'Sync' channel flag in the Image Compositon
** below ensures the methematical compose method is applied in a
** purely mathematical way, and only to the selected channels.
** IE: Turn off SVG composition 'alpha blending'.
*/
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr, " (compose \"%s\")",
CommandOptionToMnemonic(MagickComposeOptions, rslt_compose) );
(void) CompositeImage(rslt_image,curr_image,rslt_compose,MagickTrue,
0,0,exception);
curr_image = DestroyImage(curr_image);
curr_image = (Image *) image; /* continue with original image */
}
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr, "\n");
/* loop to the next kernel in a multi-kernel list */
norm_kernel = norm_kernel->next;
if ( rflt_kernel != (KernelInfo *) NULL )
rflt_kernel = rflt_kernel->next;
kernel_number++;
} /* End Loop 2: Loop over each kernel */
} /* End Loop 1: compound method interation */
goto exit_cleanup;
/* Yes goto's are bad, but it makes cleanup lot more efficient */
error_cleanup:
if ( curr_image == rslt_image )
curr_image = (Image *) NULL;
if ( rslt_image != (Image *) NULL )
rslt_image = DestroyImage(rslt_image);
exit_cleanup:
if ( curr_image == rslt_image || curr_image == image )
curr_image = (Image *) NULL;
if ( curr_image != (Image *) NULL )
curr_image = DestroyImage(curr_image);
if ( work_image != (Image *) NULL )
work_image = DestroyImage(work_image);
if ( save_image != (Image *) NULL )
save_image = DestroyImage(save_image);
if ( reflected_kernel != (KernelInfo *) NULL )
reflected_kernel = DestroyKernelInfo(reflected_kernel);
return(rslt_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h o l o g y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MorphologyImage() applies a user supplied kernel to the image according to
% the given mophology method.
%
% This function applies any and all user defined settings before calling
% the above internal function MorphologyApply().
%
% User defined settings include...
% * Output Bias for Convolution and correlation ("-define convolve:bias=??")
% * Kernel Scale/normalize settings ("-define convolve:scale=??")
% This can also includes the addition of a scaled unity kernel.
% * Show Kernel being applied ("-define morphology:showKernel=1")
%
% Other operators that do not want user supplied options interfering,
% especially "convolve:bias" and "morphology:showKernel" should use
% MorphologyApply() directly.
%
% The format of the MorphologyImage method is:
%
% Image *MorphologyImage(const Image *image,MorphologyMethod method,
% const ssize_t iterations,KernelInfo *kernel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o method: the morphology method to be applied.
%
% o iterations: apply the operation this many times (or no change).
% A value of -1 means loop until no change found.
% How this is applied may depend on the morphology method.
% Typically this is a value of 1.
%
% o kernel: An array of double representing the morphology kernel.
% Warning: kernel may be normalized for the Convolve method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MorphologyImage(const Image *image,
const MorphologyMethod method,const ssize_t iterations,
const KernelInfo *kernel,ExceptionInfo *exception)
{
const char
*artifact;
CompositeOperator
compose;
double
bias;
Image
*morphology_image;
KernelInfo
*curr_kernel;
curr_kernel = (KernelInfo *) kernel;
bias=0.0;
compose = UndefinedCompositeOp; /* use default for method */
/* Apply Convolve/Correlate Normalization and Scaling Factors.
* This is done BEFORE the ShowKernelInfo() function is called so that
* users can see the results of the 'option:convolve:scale' option.
*/
if ( method == ConvolveMorphology || method == CorrelateMorphology ) {
/* Get the bias value as it will be needed */
artifact = GetImageArtifact(image,"convolve:bias");
if ( artifact != (const char *) NULL) {
if (IsGeometry(artifact) == MagickFalse)
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"InvalidSetting","'%s' '%s'",
"convolve:bias",artifact);
else
bias=StringToDoubleInterval(artifact,(double) QuantumRange+1.0);
}
/* Scale kernel according to user wishes */
artifact = GetImageArtifact(image,"convolve:scale");
if ( artifact != (const char *) NULL ) {
if (IsGeometry(artifact) == MagickFalse)
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"InvalidSetting","'%s' '%s'",
"convolve:scale",artifact);
else {
if ( curr_kernel == kernel )
curr_kernel = CloneKernelInfo(kernel);
if (curr_kernel == (KernelInfo *) NULL)
return((Image *) NULL);
ScaleGeometryKernelInfo(curr_kernel, artifact);
}
}
}
/* display the (normalized) kernel via stderr */
artifact=GetImageArtifact(image,"morphology:showKernel");
if (IsStringTrue(artifact) != MagickFalse)
ShowKernelInfo(curr_kernel);
/* Override the default handling of multi-kernel morphology results
* If 'Undefined' use the default method
* If 'None' (default for 'Convolve') re-iterate previous result
* Otherwise merge resulting images using compose method given.
* Default for 'HitAndMiss' is 'Lighten'.
*/
{
ssize_t
parse;
artifact = GetImageArtifact(image,"morphology:compose");
if ( artifact != (const char *) NULL) {
parse=ParseCommandOption(MagickComposeOptions,
MagickFalse,artifact);
if ( parse < 0 )
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"UnrecognizedComposeOperator","'%s' '%s'",
"morphology:compose",artifact);
else
compose=(CompositeOperator)parse;
}
}
/* Apply the Morphology */
morphology_image = MorphologyApply(image,method,iterations,
curr_kernel,compose,bias,exception);
/* Cleanup and Exit */
if ( curr_kernel != kernel )
curr_kernel=DestroyKernelInfo(curr_kernel);
return(morphology_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R o t a t e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotateKernelInfo() rotates the kernel by the angle given.
%
% Currently it is restricted to 90 degree angles, of either 1D kernels
% or square kernels. And 'circular' rotations of 45 degrees for 3x3 kernels.
% It will ignore usless rotations for specific 'named' built-in kernels.
%
% The format of the RotateKernelInfo method is:
%
% void RotateKernelInfo(KernelInfo *kernel, double angle)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o angle: angle to rotate in degrees
%
% This function is currently internal to this module only, but can be exported
% to other modules if needed.
*/
static void RotateKernelInfo(KernelInfo *kernel, double angle)
{
/* angle the lower kernels first */
if ( kernel->next != (KernelInfo *) NULL)
RotateKernelInfo(kernel->next, angle);
/* WARNING: Currently assumes the kernel (rightly) is horizontally symetrical
**
** TODO: expand beyond simple 90 degree rotates, flips and flops
*/
/* Modulus the angle */
angle = fmod(angle, 360.0);
if ( angle < 0 )
angle += 360.0;
if ( 337.5 < angle || angle <= 22.5 )
return; /* Near zero angle - no change! - At least not at this time */
/* Handle special cases */
switch (kernel->type) {
/* These built-in kernels are cylindrical kernels, rotating is useless */
case GaussianKernel:
case DoGKernel:
case LoGKernel:
case DiskKernel:
case PeaksKernel:
case LaplacianKernel:
case ChebyshevKernel:
case ManhattanKernel:
case EuclideanKernel:
return;
/* These may be rotatable at non-90 angles in the future */
/* but simply rotating them in multiples of 90 degrees is useless */
case SquareKernel:
case DiamondKernel:
case PlusKernel:
case CrossKernel:
return;
/* These only allows a +/-90 degree rotation (by transpose) */
/* A 180 degree rotation is useless */
case BlurKernel:
if ( 135.0 < angle && angle <= 225.0 )
return;
if ( 225.0 < angle && angle <= 315.0 )
angle -= 180;
break;
default:
break;
}
/* Attempt rotations by 45 degrees -- 3x3 kernels only */
if ( 22.5 < fmod(angle,90.0) && fmod(angle,90.0) <= 67.5 )
{
if ( kernel->width == 3 && kernel->height == 3 )
{ /* Rotate a 3x3 square by 45 degree angle */
double t = kernel->values[0];
kernel->values[0] = kernel->values[3];
kernel->values[3] = kernel->values[6];
kernel->values[6] = kernel->values[7];
kernel->values[7] = kernel->values[8];
kernel->values[8] = kernel->values[5];
kernel->values[5] = kernel->values[2];
kernel->values[2] = kernel->values[1];
kernel->values[1] = t;
/* rotate non-centered origin */
if ( kernel->x != 1 || kernel->y != 1 ) {
ssize_t x,y;
x = (ssize_t) kernel->x-1;
y = (ssize_t) kernel->y-1;
if ( x == y ) x = 0;
else if ( x == 0 ) x = -y;
else if ( x == -y ) y = 0;
else if ( y == 0 ) y = x;
kernel->x = (ssize_t) x+1;
kernel->y = (ssize_t) y+1;
}
angle = fmod(angle+315.0, 360.0); /* angle reduced 45 degrees */
kernel->angle = fmod(kernel->angle+45.0, 360.0);
}
else
perror("Unable to rotate non-3x3 kernel by 45 degrees");
}
if ( 45.0 < fmod(angle, 180.0) && fmod(angle,180.0) <= 135.0 )
{
if ( kernel->width == 1 || kernel->height == 1 )
{ /* Do a transpose of a 1 dimensional kernel,
** which results in a fast 90 degree rotation of some type.
*/
ssize_t
t;
t = (ssize_t) kernel->width;
kernel->width = kernel->height;
kernel->height = (size_t) t;
t = kernel->x;
kernel->x = kernel->y;
kernel->y = t;
if ( kernel->width == 1 ) {
angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */
kernel->angle = fmod(kernel->angle+90.0, 360.0);
} else {
angle = fmod(angle+90.0, 360.0); /* angle increased 90 degrees */
kernel->angle = fmod(kernel->angle+270.0, 360.0);
}
}
else if ( kernel->width == kernel->height )
{ /* Rotate a square array of values by 90 degrees */
{ register ssize_t
i,j,x,y;
register MagickRealType
*k,t;
k=kernel->values;
for( i=0, x=(ssize_t) kernel->width-1; i<=x; i++, x--)
for( j=0, y=(ssize_t) kernel->height-1; j<y; j++, y--)
{ t = k[i+j*kernel->width];
k[i+j*kernel->width] = k[j+x*kernel->width];
k[j+x*kernel->width] = k[x+y*kernel->width];
k[x+y*kernel->width] = k[y+i*kernel->width];
k[y+i*kernel->width] = t;
}
}
/* rotate the origin - relative to center of array */
{ register ssize_t x,y;
x = (ssize_t) (kernel->x*2-kernel->width+1);
y = (ssize_t) (kernel->y*2-kernel->height+1);
kernel->x = (ssize_t) ( -y +(ssize_t) kernel->width-1)/2;
kernel->y = (ssize_t) ( +x +(ssize_t) kernel->height-1)/2;
}
angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */
kernel->angle = fmod(kernel->angle+90.0, 360.0);
}
else
perror("Unable to rotate a non-square, non-linear kernel 90 degrees");
}
if ( 135.0 < angle && angle <= 225.0 )
{
/* For a 180 degree rotation - also know as a reflection
* This is actually a very very common operation!
* Basically all that is needed is a reversal of the kernel data!
* And a reflection of the origon
*/
MagickRealType
t;
register MagickRealType
*k;
ssize_t
i,
j;
k=kernel->values;
j=(ssize_t) (kernel->width*kernel->height-1);
for (i=0; i < j; i++, j--)
t=k[i], k[i]=k[j], k[j]=t;
kernel->x = (ssize_t) kernel->width - kernel->x - 1;
kernel->y = (ssize_t) kernel->height - kernel->y - 1;
angle = fmod(angle-180.0, 360.0); /* angle+180 degrees */
kernel->angle = fmod(kernel->angle+180.0, 360.0);
}
/* At this point angle should at least between -45 (315) and +45 degrees
* In the future some form of non-orthogonal angled rotates could be
* performed here, posibily with a linear kernel restriction.
*/
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e G e o m e t r y K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleGeometryKernelInfo() takes a geometry argument string, typically
% provided as a "-set option:convolve:scale {geometry}" user setting,
% and modifies the kernel according to the parsed arguments of that setting.
%
% The first argument (and any normalization flags) are passed to
% ScaleKernelInfo() to scale/normalize the kernel. The second argument
% is then passed to UnityAddKernelInfo() to add a scled unity kernel
% into the scaled/normalized kernel.
%
% The format of the ScaleGeometryKernelInfo method is:
%
% void ScaleGeometryKernelInfo(KernelInfo *kernel,
% const double scaling_factor,const MagickStatusType normalize_flags)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to modify
%
% o geometry:
% The geometry string to parse, typically from the user provided
% "-set option:convolve:scale {geometry}" setting.
%
*/
MagickExport void ScaleGeometryKernelInfo (KernelInfo *kernel,
const char *geometry)
{
MagickStatusType
flags;
GeometryInfo
args;
SetGeometryInfo(&args);
flags = ParseGeometry(geometry, &args);
#if 0
/* For Debugging Geometry Input */
(void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n",
flags, args.rho, args.sigma, args.xi, args.psi );
#endif
if ( (flags & PercentValue) != 0 ) /* Handle Percentage flag*/
args.rho *= 0.01, args.sigma *= 0.01;
if ( (flags & RhoValue) == 0 ) /* Set Defaults for missing args */
args.rho = 1.0;
if ( (flags & SigmaValue) == 0 )
args.sigma = 0.0;
/* Scale/Normalize the input kernel */
ScaleKernelInfo(kernel, args.rho, (GeometryFlags) flags);
/* Add Unity Kernel, for blending with original */
if ( (flags & SigmaValue) != 0 )
UnityAddKernelInfo(kernel, args.sigma);
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleKernelInfo() scales the given kernel list by the given amount, with or
% without normalization of the sum of the kernel values (as per given flags).
%
% By default (no flags given) the values within the kernel is scaled
% directly using given scaling factor without change.
%
% If either of the two 'normalize_flags' are given the kernel will first be
% normalized and then further scaled by the scaling factor value given.
%
% Kernel normalization ('normalize_flags' given) is designed to ensure that
% any use of the kernel scaling factor with 'Convolve' or 'Correlate'
% morphology methods will fall into -1.0 to +1.0 range. Note that for
% non-HDRI versions of IM this may cause images to have any negative results
% clipped, unless some 'bias' is used.
%
% More specifically. Kernels which only contain positive values (such as a
% 'Gaussian' kernel) will be scaled so that those values sum to +1.0,
% ensuring a 0.0 to +1.0 output range for non-HDRI images.
%
% For Kernels that contain some negative values, (such as 'Sharpen' kernels)
% the kernel will be scaled by the absolute of the sum of kernel values, so
% that it will generally fall within the +/- 1.0 range.
%
% For kernels whose values sum to zero, (such as 'Laplician' kernels) kernel
% will be scaled by just the sum of the postive values, so that its output
% range will again fall into the +/- 1.0 range.
%
% For special kernels designed for locating shapes using 'Correlate', (often
% only containing +1 and -1 values, representing foreground/brackground
% matching) a special normalization method is provided to scale the positive
% values separately to those of the negative values, so the kernel will be
% forced to become a zero-sum kernel better suited to such searches.
%
% WARNING: Correct normalization of the kernel assumes that the '*_range'
% attributes within the kernel structure have been correctly set during the
% kernels creation.
%
% NOTE: The values used for 'normalize_flags' have been selected specifically
% to match the use of geometry options, so that '!' means NormalizeValue, '^'
% means CorrelateNormalizeValue. All other GeometryFlags values are ignored.
%
% The format of the ScaleKernelInfo method is:
%
% void ScaleKernelInfo(KernelInfo *kernel, const double scaling_factor,
% const MagickStatusType normalize_flags )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o scaling_factor:
% multiply all values (after normalization) by this factor if not
% zero. If the kernel is normalized regardless of any flags.
%
% o normalize_flags:
% GeometryFlags defining normalization method to use.
% specifically: NormalizeValue, CorrelateNormalizeValue,
% and/or PercentValue
%
*/
MagickExport void ScaleKernelInfo(KernelInfo *kernel,
const double scaling_factor,const GeometryFlags normalize_flags)
{
register double
pos_scale,
neg_scale;
register ssize_t
i;
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
ScaleKernelInfo(kernel->next, scaling_factor, normalize_flags);
/* Normalization of Kernel */
pos_scale = 1.0;
if ( (normalize_flags&NormalizeValue) != 0 ) {
if ( fabs(kernel->positive_range + kernel->negative_range) >= MagickEpsilon )
/* non-zero-summing kernel (generally positive) */
pos_scale = fabs(kernel->positive_range + kernel->negative_range);
else
/* zero-summing kernel */
pos_scale = kernel->positive_range;
}
/* Force kernel into a normalized zero-summing kernel */
if ( (normalize_flags&CorrelateNormalizeValue) != 0 ) {
pos_scale = ( fabs(kernel->positive_range) >= MagickEpsilon )
? kernel->positive_range : 1.0;
neg_scale = ( fabs(kernel->negative_range) >= MagickEpsilon )
? -kernel->negative_range : 1.0;
}
else
neg_scale = pos_scale;
/* finialize scaling_factor for positive and negative components */
pos_scale = scaling_factor/pos_scale;
neg_scale = scaling_factor/neg_scale;
for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++)
if (!IsNaN(kernel->values[i]))
kernel->values[i] *= (kernel->values[i] >= 0) ? pos_scale : neg_scale;
/* convolution output range */
kernel->positive_range *= pos_scale;
kernel->negative_range *= neg_scale;
/* maximum and minimum values in kernel */
kernel->maximum *= (kernel->maximum >= 0.0) ? pos_scale : neg_scale;
kernel->minimum *= (kernel->minimum >= 0.0) ? pos_scale : neg_scale;
/* swap kernel settings if user's scaling factor is negative */
if ( scaling_factor < MagickEpsilon ) {
double t;
t = kernel->positive_range;
kernel->positive_range = kernel->negative_range;
kernel->negative_range = t;
t = kernel->maximum;
kernel->maximum = kernel->minimum;
kernel->minimum = 1;
}
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h o w K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShowKernelInfo() outputs the details of the given kernel defination to
% standard error, generally due to a users 'morphology:showKernel' option
% request.
%
% The format of the ShowKernel method is:
%
% void ShowKernelInfo(const KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
*/
MagickPrivate void ShowKernelInfo(const KernelInfo *kernel)
{
const KernelInfo
*k;
size_t
c, i, u, v;
for (c=0, k=kernel; k != (KernelInfo *) NULL; c++, k=k->next ) {
(void) FormatLocaleFile(stderr, "Kernel");
if ( kernel->next != (KernelInfo *) NULL )
(void) FormatLocaleFile(stderr, " #%lu", (unsigned long) c );
(void) FormatLocaleFile(stderr, " \"%s",
CommandOptionToMnemonic(MagickKernelOptions, k->type) );
if ( fabs(k->angle) >= MagickEpsilon )
(void) FormatLocaleFile(stderr, "@%lg", k->angle);
(void) FormatLocaleFile(stderr, "\" of size %lux%lu%+ld%+ld",(unsigned long)
k->width,(unsigned long) k->height,(long) k->x,(long) k->y);
(void) FormatLocaleFile(stderr,
" with values from %.*lg to %.*lg\n",
GetMagickPrecision(), k->minimum,
GetMagickPrecision(), k->maximum);
(void) FormatLocaleFile(stderr, "Forming a output range from %.*lg to %.*lg",
GetMagickPrecision(), k->negative_range,
GetMagickPrecision(), k->positive_range);
if ( fabs(k->positive_range+k->negative_range) < MagickEpsilon )
(void) FormatLocaleFile(stderr, " (Zero-Summing)\n");
else if ( fabs(k->positive_range+k->negative_range-1.0) < MagickEpsilon )
(void) FormatLocaleFile(stderr, " (Normalized)\n");
else
(void) FormatLocaleFile(stderr, " (Sum %.*lg)\n",
GetMagickPrecision(), k->positive_range+k->negative_range);
for (i=v=0; v < k->height; v++) {
(void) FormatLocaleFile(stderr, "%2lu:", (unsigned long) v );
for (u=0; u < k->width; u++, i++)
if (IsNaN(k->values[i]))
(void) FormatLocaleFile(stderr," %*s", GetMagickPrecision()+3, "nan");
else
(void) FormatLocaleFile(stderr," %*.*lg", GetMagickPrecision()+3,
GetMagickPrecision(), (double) k->values[i]);
(void) FormatLocaleFile(stderr,"\n");
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n i t y A d d K e r n a l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnityAddKernelInfo() Adds a given amount of the 'Unity' Convolution Kernel
% to the given pre-scaled and normalized Kernel. This in effect adds that
% amount of the original image into the resulting convolution kernel. This
% value is usually provided by the user as a percentage value in the
% 'convolve:scale' setting.
%
% The resulting effect is to convert the defined kernels into blended
% soft-blurs, unsharp kernels or into sharpening kernels.
%
% The format of the UnityAdditionKernelInfo method is:
%
% void UnityAdditionKernelInfo(KernelInfo *kernel, const double scale )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o scale:
% scaling factor for the unity kernel to be added to
% the given kernel.
%
*/
MagickExport void UnityAddKernelInfo(KernelInfo *kernel,
const double scale)
{
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
UnityAddKernelInfo(kernel->next, scale);
/* Add the scaled unity kernel to the existing kernel */
kernel->values[kernel->x+kernel->y*kernel->width] += scale;
CalcKernelMetaData(kernel); /* recalculate the meta-data */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Z e r o K e r n e l N a n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ZeroKernelNans() replaces any special 'nan' value that may be present in
% the kernel with a zero value. This is typically done when the kernel will
% be used in special hardware (GPU) convolution processors, to simply
% matters.
%
% The format of the ZeroKernelNans method is:
%
% void ZeroKernelNans (KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
*/
MagickPrivate void ZeroKernelNans(KernelInfo *kernel)
{
register size_t
i;
/* do the other kernels in a multi-kernel list first */
if (kernel->next != (KernelInfo *) NULL)
ZeroKernelNans(kernel->next);
for (i=0; i < (kernel->width*kernel->height); i++)
if (IsNaN(kernel->values[i]))
kernel->values[i]=0.0;
return;
}
|
cg.c | /*--------------------------------------------------------------------
NAS Parallel Benchmarks 2.3 OpenMP C versions - CG
This benchmark is an OpenMP C version of the NPB CG code.
The OpenMP C versions are developed by RWCP and derived from the serial
Fortran versions in "NPB 2.3-serial" developed by NAS.
Permission to use, copy, distribute and modify this software for any
purpose with or without fee is hereby granted.
This software is provided "as is" without express or implied warranty.
Send comments on the OpenMP C versions to pdp-openmp@rwcp.or.jp
Information on OpenMP activities at RWCP is available at:
http://pdplab.trc.rwcp.or.jp/pdperf/Omni/
Information on NAS Parallel Benchmarks 2.3 is available at:
http://www.nas.nasa.gov/NAS/NPB/
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
Authors: M. Yarrow
C. Kuszmaul
OpenMP C version: S. Satoh
--------------------------------------------------------------------*/
/*
c---------------------------------------------------------------------
c Note: please observe that in the routine conj_grad three
c implementations of the sparse matrix-vector multiply have
c been supplied. The default matrix-vector multiply is not
c loop unrolled. The alternate implementations are unrolled
c to a depth of 2 and unrolled to a depth of 8. Please
c experiment with these to find the fastest for your particular
c architecture. If reporting timing results, any of these three may
c be used without penalty.
c---------------------------------------------------------------------
*/
#include "npb-C.h"
#include "npbparams.h"
#include "openacc.h"
#define NZ NA*(NONZER+1)*(NONZER+1)+NA*(NONZER+2)
#ifdef _OPENARC_
#pragma openarc #define NZ \NA*(\NONZER+1)*(\NONZER+1)+\NA*(\NONZER+2)
#endif
/* global variables */
/* common /partit_size/ */
static int naa;
static int nzz;
static int firstrow;
static int lastrow;
static int firstcol;
static int lastcol;
/* common /main_int_mem/ */
//static int colidx[NZ+1]; /* colidx[1:NZ] */
//static int rowstr[NA+1+1]; /* rowstr[1:NA+1] */
static int iv[2*NA+1+1]; /* iv[1:2*NA+1] */
static int arow[NZ+1]; /* arow[1:NZ] */
static int acol[NZ+1]; /* acol[1:NZ] */
static int *colidx;
static int *rowstr;
/* common /main_flt_mem/ */
static float v[NA+1+1]; /* v[1:NA+1] */
static float aelt[NZ+1]; /* aelt[1:NZ] */
//static float a[NZ+1]; /* a[1:NZ] */
//static float x[NA+2+1]; /* x[1:NA+2] */
//static float z[NA+2+1]; /* z[1:NA+2] */
//static float p[NA+2+1]; /* p[1:NA+2] */
//static float q[NA+2+1]; /* q[1:NA+2] */
//static float r[NA+2+1]; /* r[1:NA+2] */
//static float w[NA+2+1]; /* w[1:NA+2] */
static float *a;
static float *x;
static float *z;
static float *p;
static float *q;
static float *r;
static float *w;
/* common /urando/ */
static float amult;
static float tran;
// Static variables used in conj_grad().
static float d, sum, rho, rho0, alpha, beta;
/* function declarations */
static void conj_grad (int colidx[NZ+1], int rowstr[NA+1+1], float x[NA+2+1], float z[NA+2+1],
float a[NZ+1], float p[NA+2+1], float q[NA+2+1], float r[NA+2+1],
float w[NA+2+1], float *rnorm);
static void makea(int n, int nz, float a[NZ+1], int colidx[NZ+1], int rowstr[NA+1+1],
int nonzer, int firstrow, int lastrow, int firstcol,
int lastcol, float rcond, int arow[NZ+1], int acol[NZ+1],
float aelt[NZ+1], float v[NA+1+1], int iv[2*NA+1+1], float shift );
static void sparse(float a[NZ+1], int colidx[NZ+1], int rowstr[NA+1+1], int n,
int arow[NZ+1], int acol[NZ+1], float aelt[NZ+1],
int firstrow, int lastrow,
float x[NA+1+1], boolean mark[NA+1], int nzloc[NA+1], int nnza);
static void sprnvc(int n, int nz, float v[], int iv[], int nzloc[],
int mark[]);
static int icnvrt(float x, int ipwr2);
static void vecset(int n, float v[], int iv[], int *nzv, int i, float val);
/*--------------------------------------------------------------------
program cg
--------------------------------------------------------------------*/
int main(int argc, char **argv) {
int i_main, j_main, k_main, it;
int nthreads = 1;
float zeta;
float rnorm;
float norm_temp11;
float norm_temp12;
float t, mflops;
char classT = 'U';
boolean verified;
float zeta_verify_value, epsilon;
////////////////////////////////////
// Used for inlining conj_grad(). //
////////////////////////////////////
int i, j, k;
int cgit, cgitmax = 25;
firstrow = 1;
lastrow = NA;
firstcol = 1;
lastcol = NA;
if (NA == 1400 && NONZER == 7 && NITER == 15 && SHIFT == 10.0) {
classT = 'S';
// zeta_verify_value = 8.5971775078648;
zeta_verify_value = 8.379274368286; //serial version value with Single Precision
} else if (NA == 7000 && NONZER == 8 && NITER == 15 && SHIFT == 12.0) {
classT = 'W';
// zeta_verify_value = 10.362595087124;
zeta_verify_value = 10.11725139618; //serial version value with Single Precision
} else if (NA == 14000 && NONZER == 11 && NITER == 15 && SHIFT == 20.0) {
classT = 'A';
// zeta_verify_value = 17.130235054029;
zeta_verify_value = 18.62915039062; //serial version value with Single Precision
} else if (NA == 75000 && NONZER == 13 && NITER == 75 && SHIFT == 60.0) {
classT = 'B';
// zeta_verify_value = 22.712745482631;
zeta_verify_value = 62.42129135132; //serial version value with Single Precision
} else if (NA == 150000 && NONZER == 15 && NITER == 75 && SHIFT == 110.0) {
classT = 'C';
// zeta_verify_value = 28.973605592845;
zeta_verify_value = 115.1209869385; //serial version value with Single Precision
} else {
classT = 'U';
}
printf("\n\n NAS Parallel Benchmarks 2.3 OpenMP C version"
" - CG Benchmark\n");
printf(" Size: %10d\n", NA);
printf(" Iterations: %5d\n", NITER);
naa = NA;
nzz = NZ;
timer_clear(2);
timer_clear(3);
timer_clear(4);
timer_start(2);
/*--------------------------------------------------------------------
c Initialize random number generator
c-------------------------------------------------------------------*/
// Initial numbers are changed for single precision
// tran = 314159265.0;
// amult = 1220703125.0;
tran = 28183.0f;
amult = 390625.0f;
zeta = randlc( &tran, amult );
// Allocate the main data structures.
/*
colidx = (int *)malloc(sizeof(int)*(NZ+1));
rowstr = (int *)malloc(sizeof(int)*(NA+1+1));
a = (float *)malloc(sizeof(float)*(NZ+1));
x = (float *)malloc(sizeof(float)*(NA+2+1));
z = (float *)malloc(sizeof(float)*(NA+2+1));
p = (float *)malloc(sizeof(float)*(NA+2+1));
q = (float *)malloc(sizeof(float)*(NA+2+1));
r = (float *)malloc(sizeof(float)*(NA+2+1));
w = (float *)malloc(sizeof(float)*(NA+2+1));
*/
colidx = (int *)acc_create_unified(NULL, sizeof(int)*(NZ+1));
rowstr = (int *)acc_create_unified(NULL, sizeof(int)*(NA+1+1));
a = (float *)acc_create_unified(NULL, sizeof(float)*(NZ+1));
x = (float *)acc_create_unified(NULL, sizeof(float)*(NA+2+1));
z = (float *)acc_create_unified(NULL, sizeof(float)*(NA+2+1));
p = (float *)acc_create_unified(NULL, sizeof(float)*(NA+2+1));
q = (float *)acc_create_unified(NULL, sizeof(float)*(NA+2+1));
r = (float *)acc_create_unified(NULL, sizeof(float)*(NA+2+1));
w = (float *)acc_create_unified(NULL, sizeof(float)*(NA+2+1));
/*--------------------------------------------------------------------
c
c-------------------------------------------------------------------*/
timer_start(4);
makea(naa, nzz, a, colidx, rowstr, NONZER,
firstrow, lastrow, firstcol, lastcol,
RCOND, arow, acol, aelt, v, iv, SHIFT);
timer_stop(4);
timer_start(3);
/*---------------------------------------------------------------------
c Note: as a result of the above call to makea:
c values of j used in indexing rowstr go from 1 --> lastrow-firstrow+1
c values of colidx which are col indexes go from firstcol --> lastcol
c So:
c Shift the col index vals from actual (firstcol --> lastcol )
c to local, i.e., (1 --> lastcol-firstcol+1)
c---------------------------------------------------------------------*/
#pragma acc data \
create(x[0:NA+3]) \
create(z[0:NA+3]) \
create(p[0:NA+3]) \
create(q[0:NA+3]) \
create(r[0:NA+3]) \
create(w[0:NA+3]) \
copyin(a[0:NZ+1]) \
copyin(colidx[0:NZ+1]) \
copyin(rowstr[0:NA+2])
{
timer_stop(3);
// R/O Shared scalar: lastrow, firstrow, firstcol
// R/O Shared arrays: rowstr[NA+1+1]
// R/W Shared arrays: colidx[NZ+1]
// R/W Private scalar: j_main, k_main
#pragma acc kernels loop gang worker
for (j_main = 1; j_main <= lastrow - firstrow + 1; j_main++) {
for (k_main = rowstr[j_main]; k_main < rowstr[j_main+1]; k_main++) {
colidx[k_main] = colidx[k_main] - firstcol + 1;
}
}
/*--------------------------------------------------------------------
c set starting vector to (1, 1, .... 1)
c-------------------------------------------------------------------*/
// R/W Shared arrays: x[NA+2+1]
// R/W Private scalar: i_main
#pragma acc kernels loop gang worker
for (i_main = 1; i_main <= NA+1; i_main++) {
x[i_main] = 1.0f;
}
// R/W Shared scalar: zeta
zeta = 0.0f;
/*-------------------------------------------------------------------
c---->
c Do one iteration untimed to init all code and data page tables
c----> (then reinit, start timing, to niter its)
c-------------------------------------------------------------------*/
for (it = 1; it <= 1; it++) {
/*--------------------------------------------------------------------
c The call to the conjugate gradient routine:
c-------------------------------------------------------------------*/
//conj_grad (colidx, rowstr, x, z, a, p, q, r, w, &rnorm);
cgitmax = 25;
// R/W Shared scalars: rho (function-static)
rho = 0.0f;
/*--------------------------------------------------------------------
c Initialize the CG algorithm:
c-------------------------------------------------------------------*/
// R/W Shared arrays: x[NA+2+1], r[NA+2+1]
// R/W Shared arrays: q[NA+2+1], z[NA+2+1], r[NA+2+1], p[NA+2+1], w[NA+2+1]
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= NA+1; j++) {
q[j] = 0.0f;
z[j] = 0.0f;
r[j] = x[j];
p[j] = r[j];
w[j] = 0.0f;
}
/*--------------------------------------------------------------------
c rho = r.r
c Now, obtain the norm of r: First, sum squares of r elements locally...
c-------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: x[NA+2+1]
// R/W Shared scalars: rho (function-static)
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
rho = rho + x[j]*x[j];
}
/*--------------------------------------------------------------------
c---->
c The conj grad iteration loop
c---->
c-------------------------------------------------------------------*/
for (cgit = 1; cgit <= cgitmax; cgit++) {
// R/W Shared scalars: d, rho, rho0 (function-static)
{
rho0 = rho;
d = 0.0f;
rho = 0.0f;
} /* end single */
/*--------------------------------------------------------------------
c q = A.p
c The partition submatrix-vector multiply: use workspace w
c---------------------------------------------------------------------
C
C NOTE: this version of the multiply is actually (slightly: maybe %5)
C faster on the sp2 on 16 nodes than is the unrolled-by-2 version
C below. On the Cray t3d, the reverse is true, i.e., the
C unrolled-by-two version is some 10% faster.
C The unrolled-by-8 version below is significantly faster
C on the Cray t3d - overall speed of code is 1.5 times faster.
*/
/* rolled version */
// R/O Shared scalars: lastrow, firstrow
// R/O Shared arrays: rowstr[NA+1+1], a[NZ+1], p[NA+2+1], colidx[NZ+1],
// R/W Shared arrays: w[NA+2+1]
// R/W Private scalars: j, k, sum
#pragma acc kernels loop gang worker independent private(sum)
for (j = 1; j <= lastrow-firstrow+1; j++) {
sum = 0.0f;
for (k = rowstr[j]; k < rowstr[j+1]; k++) {
sum = sum + a[k]*p[colidx[k]];
}
w[j] = sum;
}
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: w[NA+2+1]
// R/W Shared arrays: q[NA+2+1]
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
q[j] = w[j];
}
/*--------------------------------------------------------------------
c Clear w for reuse...
c-------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/W Shared arrays: w[NA+2+1]
// R/W Private scalars: j
/*--------------------------------------------------------------------
c Obtain p.q
c-------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: p[NA+2+1], q[NA+2+1]
// R/W Shared scalars: d (function-static)
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
w[j] = 0.0f;
d = d + p[j]*q[j];
}
/*--------------------------------------------------------------------
c Obtain alpha = rho / (p.q)
c-------------------------------------------------------------------*/
// R/O Shared scalars: rho0, d (function-static)
// R/W Shared scalars: alpha (function-static)
alpha = rho0 / d;
/*--------------------------------------------------------------------
c Save a temporary of rho
c-------------------------------------------------------------------*/
/* rho0 = rho;*/
/*---------------------------------------------------------------------
c Obtain z = z + alpha*p
c and r = r - alpha*q
c---------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/O Shared scalars: alpha (function-static)
// R/O Shared arrays: p[NA+2+1], q[NA+2+1]
// R/W Shared arrays: z[NA+2+1], r[NA+2+1]
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
z[j] = z[j] + alpha*p[j];
r[j] = r[j] - alpha*q[j];
}
/*---------------------------------------------------------------------
c rho = r.r
c Now, obtain the norm of r: First, sum squares of r elements locally...
c---------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: r[NA+2+1]
// R/W Shared scalars: rho (function-static)
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
rho = rho + r[j]*r[j];
}
/*--------------------------------------------------------------------
c Obtain beta:
c-------------------------------------------------------------------*/
// R/O Shared scalars: rho0, rho (function-static)
// R/W Shared scalars: beta (function-static)
beta = rho / rho0;
/*--------------------------------------------------------------------
c p = r + beta*p
c-------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/O Shared scalars: beta (function-static)
// R/O Shared arrays: r[NA+2+1]
// R/W Shared arrays: p[NA+2+1]
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
p[j] = r[j] + beta*p[j];
}
} /* end of do cgit=1,cgitmax */
/*---------------------------------------------------------------------
c Compute residual norm explicitly: ||r|| = ||x - A.z||
c First, form A.z
c The partition submatrix-vector multiply
c---------------------------------------------------------------------*/
// R/W Shared scalars: sum (function-static)
sum = 0.0f;
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: rowstr[NA+1+1], a[NZ+1], colidx[NZ+1], z[NA+2+1]
// R/W Shared arrays: w[NA+2+1]
// R/W Private scalars: j,d,k
#pragma acc kernels loop gang worker independent private(d)
for (j = 1; j <= lastrow-firstrow+1; j++) {
d = 0.0f;
for (k = rowstr[j]; k <= rowstr[j+1]-1; k++) {
d = d + a[k]*z[colidx[k]];
}
w[j] = d;
}
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: w[NA+2+1]
// R/W Shared arrays: r[NA+2+1]
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
r[j] = w[j];
}
/*--------------------------------------------------------------------
c At this point, r contains A.z
c-------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: r[NA+2+1], x[NA+2+1]
// R/W Shared scalars: d, sum (function-static)
// R/W Private scalars: j
#pragma acc kernels loop gang worker independent private(d)
for (j = 1; j <= lastcol-firstcol+1; j++) {
d = x[j] - r[j];
sum = sum + d*d;
}
// R/O Shared scalars: sum (function-static)
// R/W Shared scalars: rnorm
{
//(*rnorm) = sqrtf(sum);
rnorm = sqrtf(sum);
} /* end single */
/*--------------------------------------------------------------------
c zeta = shift + 1/(x.z)
c So, first: (x.z)
c Also, find norm of z
c So, first: (z.z)
c-------------------------------------------------------------------*/
// R/W Shared scalars: norm_temp11, norm_temp12
{
norm_temp11 = 0.0f;
norm_temp12 = 0.0f;
} /* end single */
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: x[NA+2+1], z[NA+2+1]
// R/W Shared scalars: norm_temp11, norm_temp12
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j_main = 1; j_main <= lastcol-firstcol+1; j_main++) {
norm_temp11 = norm_temp11 + x[j_main]*z[j_main];
norm_temp12 = norm_temp12 + z[j_main]*z[j_main];
}
// R/w Shared scalars: norm_temp12
norm_temp12 = 1.0f / sqrtf( norm_temp12 );
/*--------------------------------------------------------------------
c Normalize z to obtain x
c-------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol, norm_temp12
// R/O Shared arrays: z[NA+2+1]
// R/W Shared arrays: x[NA+2+1]
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j_main = 1; j_main <= lastcol-firstcol+1; j_main++) {
x[j_main] = norm_temp12*z[j_main];
}
} /* end of do one iteration untimed */
/*--------------------------------------------------------------------
c set starting vector to (1, 1, .... 1)
c-------------------------------------------------------------------*/
// R/W Shared arrays: x[NA+2+1]
// R/W Private scalars: i_main
#pragma acc kernels loop gang worker
for (i_main = 1; i_main <= NA+1; i_main++) {
x[i_main] = 1.0f;
}
// R/W Shared scalars: zeta
zeta = 0.0f;
// } /* end parallel */
timer_clear( 1 );
timer_start( 1 );
/*--------------------------------------------------------------------
c---->
c Main Iteration for inverse power method
c---->
c-------------------------------------------------------------------*/
//#pragma omp parallel private(it,i_main,j_main,k_main)
// {
for (it = 1; it <= NITER; it++) {
/*--------------------------------------------------------------------
c The call to the conjugate gradient routine:
c-------------------------------------------------------------------*/
//conj_grad(colidx, rowstr, x, z, a, p, q, r, w, &rnorm);
cgitmax = 25;
// R/W Shared scalars: rho (function-static)
rho = 0.0f;
/*--------------------------------------------------------------------
c Initialize the CG algorithm:
c-------------------------------------------------------------------*/
// R/W Shared arrays: x[NA+2+1], r[NA+2+1]
// R/W Shared arrays: q[NA+2+1], z[NA+2+1], r[NA+2+1], p[NA+2+1], w[NA+2+1]
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= NA+1; j++) {
q[j] = 0.0f;
z[j] = 0.0f;
r[j] = x[j];
p[j] = r[j];
w[j] = 0.0f;
}
/*--------------------------------------------------------------------
c rho = r.r
c Now, obtain the norm of r: First, sum squares of r elements locally...
c-------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: x[NA+2+1]
// R/W Shared scalars: rho (function-static)
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
rho = rho + x[j]*x[j];
}
/*--------------------------------------------------------------------
c---->
c The conj grad iteration loop
c---->
c-------------------------------------------------------------------*/
for (cgit = 1; cgit <= cgitmax; cgit++) {
// R/W Shared scalars: d, rho, rho0 (function-static)
{
rho0 = rho;
d = 0.0f;
rho = 0.0f;
} /* end single */
/*--------------------------------------------------------------------
c q = A.p
c The partition submatrix-vector multiply: use workspace w
c---------------------------------------------------------------------
C
C NOTE: this version of the multiply is actually (slightly: maybe %5)
C faster on the sp2 on 16 nodes than is the unrolled-by-2 version
C below. On the Cray t3d, the reverse is true, i.e., the
C unrolled-by-two version is some 10% faster.
C The unrolled-by-8 version below is significantly faster
C on the Cray t3d - overall speed of code is 1.5 times faster.
*/
/* rolled version */
// R/O Shared scalars: lastrow, firstrow
// R/O Shared arrays: rowstr[NA+1+1], a[NZ+1], p[NA+2+1], colidx[NZ+1],
// R/W Shared arrays: w[NA+2+1]
// R/W Private scalars: j, k, sum
#pragma acc kernels loop gang worker independent private(sum)
for (j = 1; j <= lastrow-firstrow+1; j++) {
sum = 0.0f;
for (k = rowstr[j]; k < rowstr[j+1]; k++) {
sum = sum + a[k]*p[colidx[k]];
}
w[j] = sum;
}
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: w[NA+2+1]
// R/W Shared arrays: q[NA+2+1]
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
q[j] = w[j];
}
/*--------------------------------------------------------------------
c Clear w for reuse...
c-------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/W Shared arrays: w[NA+2+1]
// R/W Private scalars: j
/*--------------------------------------------------------------------
c Obtain p.q
c-------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: p[NA+2+1], q[NA+2+1]
// R/W Shared scalars: d (function-static)
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
w[j] = 0.0f;
d = d + p[j]*q[j];
}
/*--------------------------------------------------------------------
c Obtain alpha = rho / (p.q)
c-------------------------------------------------------------------*/
// R/O Shared scalars: rho0, d (function-static)
// R/W Shared scalars: alpha (function-static)
alpha = rho0 / d;
/*--------------------------------------------------------------------
c Save a temporary of rho
c-------------------------------------------------------------------*/
/* rho0 = rho;*/
/*---------------------------------------------------------------------
c Obtain z = z + alpha*p
c and r = r - alpha*q
c---------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/O Shared scalars: alpha (function-static)
// R/O Shared arrays: p[NA+2+1], q[NA+2+1]
// R/W Shared arrays: z[NA+2+1], r[NA+2+1]
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
z[j] = z[j] + alpha*p[j];
r[j] = r[j] - alpha*q[j];
}
/*---------------------------------------------------------------------
c rho = r.r
c Now, obtain the norm of r: First, sum squares of r elements locally...
c---------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: r[NA+2+1]
// R/W Shared scalars: rho (function-static)
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
rho = rho + r[j]*r[j];
}
/*--------------------------------------------------------------------
c Obtain beta:
c-------------------------------------------------------------------*/
// R/O Shared scalars: rho0, rho (function-static)
// R/W Shared scalars: beta (function-static)
beta = rho / rho0;
/*--------------------------------------------------------------------
c p = r + beta*p
c-------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/O Shared scalars: beta (function-static)
// R/O Shared arrays: r[NA+2+1]
// R/W Shared arrays: p[NA+2+1]
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
p[j] = r[j] + beta*p[j];
}
} /* end of do cgit=1,cgitmax */
/*---------------------------------------------------------------------
c Compute residual norm explicitly: ||r|| = ||x - A.z||
c First, form A.z
c The partition submatrix-vector multiply
c---------------------------------------------------------------------*/
// R/W Shared scalars: sum (function-static)
sum = 0.0f;
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: rowstr[NA+1+1], a[NZ+1], colidx[NZ+1], z[NA+2+1]
// R/W Shared arrays: w[NA+2+1]
// R/W Private scalars: j,d,k
#pragma acc kernels loop gang worker independent private(d)
for (j = 1; j <= lastrow-firstrow+1; j++) {
d = 0.0f;
for (k = rowstr[j]; k <= rowstr[j+1]-1; k++) {
d = d + a[k]*z[colidx[k]];
}
w[j] = d;
}
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: w[NA+2+1]
// R/W Shared arrays: r[NA+2+1]
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
r[j] = w[j];
}
/*--------------------------------------------------------------------
c At this point, r contains A.z
c-------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: r[NA+2+1], x[NA+2+1]
// R/W Shared scalars: d, sum (function-static)
// R/W Private scalars: j
#pragma acc kernels loop gang worker independent private(d)
for (j = 1; j <= lastcol-firstcol+1; j++) {
d = x[j] - r[j];
sum = sum + d*d;
}
// R/O Shared scalars: sum (function-static)
// R/W Shared scalars: rnorm
{
//(*rnorm) = sqrtf(sum);
rnorm = sqrtf(sum);
} /* end single */
/*--------------------------------------------------------------------
c zeta = shift + 1/(x.z)
c So, first: (x.z)
c Also, find norm of z
c So, first: (z.z)
c-------------------------------------------------------------------*/
// R/W Shared scalars: norm_temp11, norm_temp12
{
norm_temp11 = 0.0f;
norm_temp12 = 0.0f;
} /* end single */
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: x[NA+2+1], z[NA+2+1]
// R/W Shared scalars: norm_temp11, norm_temp12
// R/W Private scalars: j_main
#pragma acc kernels loop gang worker
for (j_main = 1; j_main <= lastcol-firstcol+1; j_main++) {
norm_temp11 = norm_temp11 + x[j_main]*z[j_main];
norm_temp12 = norm_temp12 + z[j_main]*z[j_main];
}
// R/O Shared scalars: norm_temp11
// R/W Shared scalars: norm_temp12, zeta
{
norm_temp12 = 1.0f / sqrtf( norm_temp12 );
zeta = SHIFT + 1.0f / norm_temp11;
} /* end single */
{
if( it == 1 ) {
printf(" iteration ||r|| zeta\n");
}
printf(" %5d %20.14e%20.13e\n", it, rnorm, zeta);
} /* end master */
/*--------------------------------------------------------------------
c Normalize z to obtain x
c-------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol, norm_temp12
// R/O Shared arrays: z[NA+2+1]
// R/W Shared arrays: x[NA+2+1]
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j_main = 1; j_main <= lastcol-firstcol+1; j_main++) {
x[j_main] = norm_temp12*z[j_main];
}
} /* end of main iter inv pow meth */
#if defined(_OPENMP)
nthreads = omp_get_num_threads();
#endif /* _OPENMP */
} /* end parallel */
timer_stop( 1 );
timer_stop( 2 );
/*--------------------------------------------------------------------
c End of timed section
c-------------------------------------------------------------------*/
t = timer_read( 1 );
printf(" Benchmark completed\n");
//epsilon = 1.0e-10;
//New value for single precision
epsilon = 1.0e-6;
if (classT != 'U') {
if (fabs(zeta - zeta_verify_value) <= epsilon) {
verified = TRUE;
printf(" VERIFICATION SUCCESSFUL\n");
printf(" Zeta is %20.12e\n", zeta);
printf(" Error is %20.12e\n", zeta - zeta_verify_value);
} else {
verified = FALSE;
printf(" VERIFICATION FAILED\n");
printf(" Zeta %20.12e\n", zeta);
printf(" The correct zeta is %20.12e\n", zeta_verify_value);
}
} else {
verified = FALSE;
printf(" Problem size unknown\n");
printf(" NO VERIFICATION PERFORMED\n");
}
if ( t != 0.0 ) {
mflops = (2.0*NITER*NA)
* (3.0+(NONZER*(NONZER+1)) + 25.0*(5.0+(NONZER*(NONZER+1))) + 3.0 )
/ t / 1000000.0;
} else {
mflops = 0.0;
}
c_print_results("CG", classT, NA, 0, 0, NITER, nthreads, t,
mflops, " floating point",
verified, NPBVERSION, COMPILETIME,
CS1, CS2, CS3, CS4, CS5, CS6, CS7);
printf("makea() execution time = %12.4f\n", timer_read(4));
printf("CUDA Initialization time = %12.4f\n", timer_read(3));
printf("Total execution time = %12.4f\n", timer_read(2));
return 0;
}
/*---------------------------------------------------------------------
c generate the test problem for benchmark 6
c makea generates a sparse matrix with a
c prescribed sparsity distribution
c
c parameter type usage
c
c input
c
c n i number of cols/rows of matrix
c nz i nonzeros as declared array size
c rcond r*8 condition number
c shift r*8 main diagonal shift
c
c output
c
c a r*8 array for nonzeros
c colidx i col indices
c rowstr i row pointers
c
c workspace
c
c iv, arow, acol i
c v, aelt r*8
c---------------------------------------------------------------------*/
static void makea(
int n,
int nz,
float a[NZ+1], /* a[1:nz] */
int colidx[NZ+1], /* colidx[1:nz] */
int rowstr[NA+1+1], /* rowstr[1:n+1] */
int nonzer,
int firstrow,
int lastrow,
int firstcol,
int lastcol,
float rcond,
int arow[NZ+1], /* arow[1:nz] */
int acol[NZ+1], /* acol[1:nz] */
float aelt[NZ+1], /* aelt[1:nz] */
float v[NA+1+1], /* v[1:n+1] */
int iv[2*NA+1+1], /* iv[1:2*n+1] */
float shift )
{
int i, nnza, iouter, ivelt, ivelt1, irow, nzv;
/*--------------------------------------------------------------------
c nonzer is approximately (int(sqrt(nnza /n)));
c-------------------------------------------------------------------*/
float size, ratio, scale;
int jcol;
size = 1.0f;
ratio = pow(rcond, (1.0f / (float)n));
nnza = 0;
/*---------------------------------------------------------------------
c Initialize colidx(n+1 .. 2n) to zero.
c Used by sprnvc to mark nonzero positions
c---------------------------------------------------------------------*/
// R/O Shared scalars: n
// R/W Shared arrays: colidx[NZ+1]
// R/W Private scalars: i
#pragma acc kernels loop gang worker pcopyout(colidx)
for (i = 1; i <= n; i++) {
colidx[n+i] = 0;
}
for (iouter = 1; iouter <= n; iouter++) {
nzv = nonzer;
sprnvc(n, nzv, v, iv, &(colidx[0]), &(colidx[n]));
vecset(n, v, iv, &nzv, iouter, 0.5);
for (ivelt = 1; ivelt <= nzv; ivelt++) {
jcol = iv[ivelt];
if (jcol >= firstcol && jcol <= lastcol) {
scale = size * v[ivelt];
for (ivelt1 = 1; ivelt1 <= nzv; ivelt1++) {
irow = iv[ivelt1];
if (irow >= firstrow && irow <= lastrow) {
nnza = nnza + 1;
if (nnza > nz) {
printf("Space for matrix elements exceeded in"
" makea\n");
printf("nnza, nzmax = %d, %d\n", nnza, nz);
printf("iouter = %d\n", iouter);
exit(1);
}
acol[nnza] = jcol;
arow[nnza] = irow;
aelt[nnza] = v[ivelt1] * scale;
}
}
}
}
size = size * ratio;
}
/*---------------------------------------------------------------------
c ... add the identity * rcond to the generated matrix to bound
c the smallest eigenvalue from below by rcond
c---------------------------------------------------------------------*/
for (i = firstrow; i <= lastrow; i++) {
if (i >= firstcol && i <= lastcol) {
iouter = n + i;
nnza = nnza + 1;
if (nnza > nz) {
printf("Space for matrix elements exceeded in makea\n");
printf("nnza, nzmax = %d, %d\n", nnza, nz);
printf("iouter = %d\n", iouter);
exit(1);
}
acol[nnza] = i;
arow[nnza] = i;
aelt[nnza] = rcond - shift;
}
}
/*---------------------------------------------------------------------
c ... make the sparse matrix from list of elements with duplicates
c (v and iv are used as workspace)
c---------------------------------------------------------------------*/
sparse(a, colidx, rowstr, n, arow, acol, aelt,
firstrow, lastrow, v, &(iv[0]), &(iv[n]), nnza);
}
/*---------------------------------------------------
c generate a sparse matrix from a list of
c [col, row, element] tri
c---------------------------------------------------*/
static void sparse(
float a[NZ+1], /* a[1:*] */
int colidx[NZ+1], /* colidx[1:*] */
int rowstr[NA+1+1], /* rowstr[1:*] */
int n,
int arow[NZ+1], /* arow[1:*] */
int acol[NZ+1], /* acol[1:*] */
float aelt[NZ+1], /* aelt[1:*] */
int firstrow,
int lastrow,
float x[NA+1+1], /* x[1:n] */
boolean mark[NA+1], /* mark[1:n] */
int nzloc[NA+1], /* nzloc[1:n] */
int nnza)
/*---------------------------------------------------------------------
c rows range from firstrow to lastrow
c the rowstr pointers are defined for nrows = lastrow-firstrow+1 values
c---------------------------------------------------------------------*/
{
int nrows;
int i, j, jajp1, nza, k, nzrow;
float xi;
/*--------------------------------------------------------------------
c how many rows of result
c-------------------------------------------------------------------*/
nrows = lastrow - firstrow + 1;
/*--------------------------------------------------------------------
c ...count the number of triples in each row
c-------------------------------------------------------------------*/
// R/O Shared scalars: n
// R/W Shared arrays: rowstr[NA+1+1], mark[n]
// R/W Private scalars: j
#pragma acc kernels loop gang worker independent \
pcopyout(rowstr[0:NA+1+1]) create(mark[0:NA+1])
for (j = 1; j <= n; j++) {
rowstr[j] = 0;
mark[j] = FALSE;
}
rowstr[n+1] = 0;
for (nza = 1; nza <= nnza; nza++) {
j = (arow[nza] - firstrow + 1) + 1;
rowstr[j] = rowstr[j] + 1;
}
rowstr[1] = 1;
for (j = 2; j <= nrows+1; j++) {
rowstr[j] = rowstr[j] + rowstr[j-1];
}
/*---------------------------------------------------------------------
c ... rowstr(j) now is the location of the first nonzero
c of row j of a
c---------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c ... do a bucket sort of the triples on the row index
c-------------------------------------------------------------------*/
for (nza = 1; nza <= nnza; nza++) {
j = arow[nza] - firstrow + 1;
k = rowstr[j];
a[k] = aelt[nza];
colidx[k] = acol[nza];
rowstr[j] = rowstr[j] + 1;
}
/*--------------------------------------------------------------------
c ... rowstr(j) now points to the first element of row j+1
c-------------------------------------------------------------------*/
for (j = nrows; j >= 1; j--) {
rowstr[j+1] = rowstr[j];
}
rowstr[1] = 1;
/*--------------------------------------------------------------------
c ... generate the actual output rows by adding elements
c-------------------------------------------------------------------*/
nza = 0;
// R/O Shared scalars: n
// R/W Shared arrays: x[NA+2+1], mark[n]
// R/W Private scalars: i
#pragma acc kernels loop gang worker pcopyout(x, mark)
for (i = 1; i <= n; i++) {
x[i] = 0.0f;
mark[i] = FALSE;
}
jajp1 = rowstr[1];
for (j = 1; j <= nrows; j++) {
nzrow = 0;
/*--------------------------------------------------------------------
c ...loop over the jth row of a
c-------------------------------------------------------------------*/
for (k = jajp1; k < rowstr[j+1]; k++) {
i = colidx[k];
x[i] = x[i] + a[k];
if ( mark[i] == FALSE && x[i] != 0.0f) {
mark[i] = TRUE;
nzrow = nzrow + 1;
nzloc[nzrow] = i;
}
}
/*--------------------------------------------------------------------
c ... extract the nonzeros of this row
c-------------------------------------------------------------------*/
for (k = 1; k <= nzrow; k++) {
i = nzloc[k];
mark[i] = FALSE;
xi = x[i];
x[i] = 0.0f;
if (xi != 0.0f) {
nza = nza + 1;
a[nza] = xi;
colidx[nza] = i;
}
}
jajp1 = rowstr[j+1];
rowstr[j+1] = nza + rowstr[1];
}
}
/*---------------------------------------------------------------------
c generate a sparse n-vector (v, iv)
c having nzv nonzeros
c
c mark(i) is set to 1 if position i is nonzero.
c mark is all zero on entry and is reset to all zero before exit
c this corrects a performance bug found by John G. Lewis, caused by
c reinitialization of mark on every one of the n calls to sprnvc
---------------------------------------------------------------------*/
static void sprnvc(
int n,
int nz,
float v[], /* v[1:*] */
int iv[], /* iv[1:*] */
int nzloc[], /* nzloc[1:n] */
int mark[] ) /* mark[1:n] */
{
int nn1;
int nzrow, nzv, ii, i;
float vecelt, vecloc;
nzv = 0;
nzrow = 0;
nn1 = 1;
do {
nn1 = 2 * nn1;
} while (nn1 < n);
/*--------------------------------------------------------------------
c nn1 is the smallest power of two not less than n
c-------------------------------------------------------------------*/
while (nzv < nz) {
vecelt = randlc(&tran, amult);
/*--------------------------------------------------------------------
c generate an integer between 1 and n in a portable manner
c-------------------------------------------------------------------*/
vecloc = randlc(&tran, amult);
i = icnvrt(vecloc, nn1) + 1;
if (i > n) continue;
/*--------------------------------------------------------------------
c was this integer generated already?
c-------------------------------------------------------------------*/
if (mark[i] == 0) {
mark[i] = 1;
nzrow = nzrow + 1;
nzloc[nzrow] = i;
nzv = nzv + 1;
v[nzv] = vecelt;
iv[nzv] = i;
}
}
for (ii = 1; ii <= nzrow; ii++) {
i = nzloc[ii];
mark[i] = 0;
}
}
/*---------------------------------------------------------------------
* scale a float precision number x in (0,1) by a power of 2 and chop it
*---------------------------------------------------------------------*/
static int icnvrt(float x, int ipwr2) {
return ((int)(ipwr2 * x));
}
/*--------------------------------------------------------------------
c set ith element of sparse vector (v, iv) with
c nzv nonzeros to val
c-------------------------------------------------------------------*/
static void vecset(
int n,
float v[], /* v[1:*] */
int iv[], /* iv[1:*] */
int *nzv,
int i,
float val)
{
int k;
boolean set;
set = FALSE;
for (k = 1; k <= *nzv; k++) {
if (iv[k] == i) {
v[k] = val;
set = TRUE;
}
}
if (set == FALSE) {
*nzv = *nzv + 1;
v[*nzv] = val;
iv[*nzv] = i;
}
}
|
fill_int2e.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdlib.h>
#include <math.h>
#include "config.h"
#include "cint.h"
#define MAX(I,J) ((I) > (J) ? (I) : (J))
#define MIN(I,J) ((I) < (J) ? (I) : (J))
int GTOmax_shell_dim(int *ao_loc, int *shls_slice, int ncenter)
{
int i;
int i0 = shls_slice[0];
int i1 = shls_slice[1];
int di = 0;
for (i = 1; i < ncenter; i++) {
i0 = MIN(i0, shls_slice[i*2 ]);
i1 = MAX(i1, shls_slice[i*2+1]);
}
for (i = i0; i < i1; i++) {
di = MAX(di, ao_loc[i+1]-ao_loc[i]);
}
return di;
}
int GTOmax_cache_size(int (*intor)(), int *shls_slice, int ncenter,
int *atm, int natm, int *bas, int nbas, double *env)
{
int i, n;
int i0 = shls_slice[0];
int i1 = shls_slice[1];
for (i = 1; i < ncenter; i++) {
i0 = MIN(i0, shls_slice[i*2 ]);
i1 = MAX(i1, shls_slice[i*2+1]);
}
int shls[4];
int cache_size = 0;
for (i = i0; i < i1; i++) {
shls[0] = i;
shls[1] = i;
shls[2] = i;
shls[3] = i;
n = (*intor)(NULL, NULL, shls, atm, natm, bas, nbas, env, NULL, NULL);
cache_size = MAX(cache_size, n);
}
return cache_size;
}
/*
*************************************************
* 2e AO integrals in s4, s2ij, s2kl, s1
*/
void GTOnr2e_fill_s1(int (*intor)(), int (*fprescreen)(),
double *eri, double *buf, int comp, int ishp, int jshp,
int *shls_slice, int *ao_loc, CINTOpt *cintopt,
int *atm, int natm, int *bas, int nbas, double *env)
{
int ish0 = shls_slice[0];
int ish1 = shls_slice[1];
int jsh0 = shls_slice[2];
int jsh1 = shls_slice[3];
int ksh0 = shls_slice[4];
int ksh1 = shls_slice[5];
int lsh0 = shls_slice[6];
int lsh1 = shls_slice[7];
int ni = ao_loc[ish1] - ao_loc[ish0];
int nj = ao_loc[jsh1] - ao_loc[jsh0];
int nk = ao_loc[ksh1] - ao_loc[ksh0];
int nl = ao_loc[lsh1] - ao_loc[lsh0];
size_t nij = ni * nj;
size_t nkl = nk * nl;
size_t neri = nij * nkl;
int ish = ishp + ish0;
int jsh = jshp + jsh0;
int i0 = ao_loc[ish] - ao_loc[ish0];
int j0 = ao_loc[jsh] - ao_loc[jsh0];
eri += nkl * (i0 * nj + j0);
int di = ao_loc[ish+1] - ao_loc[ish];
int dj = ao_loc[jsh+1] - ao_loc[jsh];
int dij = di * dj;
int k0, l0, dk, dl, dijk, dijkl;
int i, j, k, l, icomp;
int ksh, lsh;
int shls[4];
double *eri0, *peri, *buf0, *pbuf, *cache;
shls[0] = ish;
shls[1] = jsh;
for (ksh = ksh0; ksh < ksh1; ksh++) {
for (lsh = lsh0; lsh < lsh1; lsh++) {
shls[2] = ksh;
shls[3] = lsh;
k0 = ao_loc[ksh] - ao_loc[ksh0];
l0 = ao_loc[lsh] - ao_loc[lsh0];
dk = ao_loc[ksh+1] - ao_loc[ksh];
dl = ao_loc[lsh+1] - ao_loc[lsh];
dijk = dij * dk;
dijkl = dijk * dl;
cache = buf + dijkl * comp;
if ((*fprescreen)(shls, atm, bas, env) &&
(*intor)(buf, NULL, shls, atm, natm, bas, nbas, env, cintopt, cache)) {
eri0 = eri + k0*nl+l0;
buf0 = buf;
for (icomp = 0; icomp < comp; icomp++) {
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
peri = eri0 + nkl*(i*nj+j);
for (k = 0; k < dk; k++) {
for (pbuf = buf0 + k*dij + j*di + i,
l = 0; l < dl; l++) {
peri[k*nl+l] = pbuf[l*dijk];
} }
} }
buf0 += dijkl;
eri0 += neri;
}
} else {
eri0 = eri + k0*nl+l0;
for (icomp = 0; icomp < comp; icomp++) {
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
peri = eri0 + nkl*(i*nj+j);
for (k = 0; k < dk; k++) {
for (l = 0; l < dl; l++) {
peri[k*nl+l] = 0;
}
}
} }
eri0 += neri;
}
}
} }
}
void GTOnr2e_fill_s2ij(int (*intor)(), int (*fprescreen)(),
double *eri, double *buf, int comp, int ishp, int jshp,
int *shls_slice, int *ao_loc, CINTOpt *cintopt,
int *atm, int natm, int *bas, int nbas, double *env)
{
if (ishp < jshp) {
return;
}
int ish0 = shls_slice[0];
int ish1 = shls_slice[1];
int jsh0 = shls_slice[2];
//int jsh1 = shls_slice[3];
int ksh0 = shls_slice[4];
int ksh1 = shls_slice[5];
int lsh0 = shls_slice[6];
int lsh1 = shls_slice[7];
int ni = ao_loc[ish1] - ao_loc[ish0];
//int nj = ao_loc[jsh1] - ao_loc[jsh0];
int nk = ao_loc[ksh1] - ao_loc[ksh0];
int nl = ao_loc[lsh1] - ao_loc[lsh0];
size_t nij = ni * (ni+1) / 2;
size_t nkl = nk * nl;
size_t neri = nij * nkl;
int ish = ishp + ish0;
int jsh = jshp + jsh0;
int i0 = ao_loc[ish] - ao_loc[ish0];
int j0 = ao_loc[jsh] - ao_loc[jsh0];
eri += nkl * (i0*(i0+1)/2 + j0);
int di = ao_loc[ish+1] - ao_loc[ish];
int dj = ao_loc[jsh+1] - ao_loc[jsh];
int dij = di * dj;
int k0, l0, dk, dl, dijk, dijkl;
int i, j, k, l, icomp;
int ksh, lsh;
int shls[4];
double *eri0, *peri0, *peri, *buf0, *pbuf, *cache;
shls[0] = ish;
shls[1] = jsh;
for (ksh = ksh0; ksh < ksh1; ksh++) {
for (lsh = lsh0; lsh < lsh1; lsh++) {
shls[2] = ksh;
shls[3] = lsh;
k0 = ao_loc[ksh] - ao_loc[ksh0];
l0 = ao_loc[lsh] - ao_loc[lsh0];
dk = ao_loc[ksh+1] - ao_loc[ksh];
dl = ao_loc[lsh+1] - ao_loc[lsh];
dijk = dij * dk;
dijkl = dijk * dl;
cache = buf + dijkl * comp;
if ((*fprescreen)(shls, atm, bas, env) &&
(*intor)(buf, NULL, shls, atm, natm, bas, nbas, env, cintopt, cache)) {
eri0 = eri + k0*nl+l0;
buf0 = buf;
for (icomp = 0; icomp < comp; icomp++) {
peri0 = eri0;
if (ishp > jshp) {
for (i = 0; i < di; i++, peri0+=nkl*(i0+i)) {
for (j = 0; j < dj; j++) {
peri = peri0 + nkl*j;
for (k = 0; k < dk; k++) {
for (pbuf = buf0 + k*dij + j*di + i,
l = 0; l < dl; l++) {
peri[k*nl+l] = pbuf[l*dijk];
} }
} }
} else {
for (i = 0; i < di; i++, peri0+=nkl*(i0+i)) {
for (j = 0; j <= i; j++) {
peri = peri0 + nkl*j;
for (k = 0; k < dk; k++) {
for (pbuf = buf0 + k*dij + j*di + i,
l = 0; l < dl; l++) {
peri[k*nl+l] = pbuf[l*dijk];
} }
} }
}
buf0 += dijkl;
eri0 += neri;
}
} else {
eri0 = eri + k0*nl+l0;
for (icomp = 0; icomp < comp; icomp++) {
peri0 = eri0;
if (ishp > jshp) {
for (i = 0; i < di; i++, peri0+=nkl*(i0+i)) {
for (j = 0; j < dj; j++) {
peri = peri0 + nkl*j;
for (k = 0; k < dk; k++) {
for (l = 0; l < dl; l++) {
peri[k*nl+l] = 0;
} }
} }
} else {
for (i = 0; i < di; i++, peri0+=nkl*(i0+i)) {
for (j = 0; j <= i; j++) {
peri = peri0 + nkl*j;
for (k = 0; k < dk; k++) {
for (l = 0; l < dl; l++) {
peri[k*nl+l] = 0;
} }
} }
}
eri0 += neri;
}
}
} }
}
void GTOnr2e_fill_s2kl(int (*intor)(), int (*fprescreen)(),
double *eri, double *buf, int comp, int ishp, int jshp,
int *shls_slice, int *ao_loc, CINTOpt *cintopt,
int *atm, int natm, int *bas, int nbas, double *env)
{
int ish0 = shls_slice[0];
int ish1 = shls_slice[1];
int jsh0 = shls_slice[2];
int jsh1 = shls_slice[3];
int ksh0 = shls_slice[4];
int ksh1 = shls_slice[5];
int lsh0 = shls_slice[6];
//int lsh1 = shls_slice[7];
int ni = ao_loc[ish1] - ao_loc[ish0];
int nj = ao_loc[jsh1] - ao_loc[jsh0];
int nk = ao_loc[ksh1] - ao_loc[ksh0];
//int nl = ao_loc[lsh1] - ao_loc[lsh0];
size_t nij = ni * nj;
size_t nkl = nk * (nk+1) / 2;
size_t neri = nij * nkl;
int ish = ishp + ish0;
int jsh = jshp + jsh0;
int i0 = ao_loc[ish] - ao_loc[ish0];
int j0 = ao_loc[jsh] - ao_loc[jsh0];
eri += nkl * (i0 * nj + j0);
int di = ao_loc[ish+1] - ao_loc[ish];
int dj = ao_loc[jsh+1] - ao_loc[jsh];
int dij = di * dj;
int k0, l0, dk, dl, dijk, dijkl;
int i, j, k, l, icomp;
int ksh, lsh, kshp, lshp;
int shls[4];
double *eri0, *peri, *buf0, *pbuf, *cache;
shls[0] = ish;
shls[1] = jsh;
for (kshp = 0; kshp < ksh1-ksh0; kshp++) {
for (lshp = 0; lshp <= kshp; lshp++) {
ksh = kshp + ksh0;
lsh = lshp + lsh0;
shls[2] = ksh;
shls[3] = lsh;
k0 = ao_loc[ksh] - ao_loc[ksh0];
l0 = ao_loc[lsh] - ao_loc[lsh0];
dk = ao_loc[ksh+1] - ao_loc[ksh];
dl = ao_loc[lsh+1] - ao_loc[lsh];
dijk = dij * dk;
dijkl = dijk * dl;
cache = buf + dijkl * comp;
if ((*fprescreen)(shls, atm, bas, env) &&
(*intor)(buf, NULL, shls, atm, natm, bas, nbas, env, cintopt, cache)) {
eri0 = eri + k0*(k0+1)/2+l0;
buf0 = buf;
for (icomp = 0; icomp < comp; icomp++) {
if (kshp > lshp) {
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
peri = eri0 + nkl*(i*nj+j);
for (k = 0; k < dk; k++, peri+=k0+k) {
for (pbuf = buf0 + k*dij + j*di + i,
l = 0; l < dl; l++) {
peri[l] = pbuf[l*dijk];
} }
} }
} else {
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
peri = eri0 + nkl*(i*nj+j);
for (k = 0; k < dk; k++, peri+=k0+k) {
for (pbuf = buf0 + k*dij + j*di + i,
l = 0; l <= k; l++) {
peri[l] = pbuf[l*dijk];
} }
} }
}
buf0 += dijkl;
eri0 += neri;
}
} else {
eri0 = eri + k0*(k0+1)/2+l0;
for (icomp = 0; icomp < comp; icomp++) {
if (kshp > lshp) {
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
peri = eri0 + nkl*(i*nj+j);
for (k = 0; k < dk; k++, peri+=k0+k) {
for (l = 0; l < dl; l++) {
peri[l] = 0;
} }
} }
} else {
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
peri = eri0 + nkl*(i*nj+j);
for (k = 0; k < dk; k++, peri+=k0+k) {
for (l = 0; l <= k; l++) {
peri[l] = 0;
} }
} }
}
eri0 += neri;
}
}
} }
}
void GTOnr2e_fill_s4(int (*intor)(), int (*fprescreen)(),
double *eri, double *buf, int comp, int ishp, int jshp,
int *shls_slice, int *ao_loc, CINTOpt *cintopt,
int *atm, int natm, int *bas, int nbas, double *env)
{
if (ishp < jshp) {
return;
}
int ish0 = shls_slice[0];
int ish1 = shls_slice[1];
int jsh0 = shls_slice[2];
//int jsh1 = shls_slice[3];
int ksh0 = shls_slice[4];
int ksh1 = shls_slice[5];
int lsh0 = shls_slice[6];
//int lsh1 = shls_slice[7];
int ni = ao_loc[ish1] - ao_loc[ish0];
//int nj = ao_loc[jsh1] - ao_loc[jsh0];
int nk = ao_loc[ksh1] - ao_loc[ksh0];
//int nl = ao_loc[lsh1] - ao_loc[lsh0];
size_t nij = ni * (ni+1) / 2;
size_t nkl = nk * (nk+1) / 2;
size_t neri = nij * nkl;
int ish = ishp + ish0;
int jsh = jshp + jsh0;
int i0 = ao_loc[ish] - ao_loc[ish0];
int j0 = ao_loc[jsh] - ao_loc[jsh0];
eri += nkl * (i0*(i0+1)/2 + j0);
int di = ao_loc[ish+1] - ao_loc[ish];
int dj = ao_loc[jsh+1] - ao_loc[jsh];
int dij = di * dj;
int k0, l0, dk, dl, dijk, dijkl;
int i, j, k, l, icomp;
int ksh, lsh, kshp, lshp;
int shls[4];
double *eri0, *peri0, *peri, *buf0, *pbuf, *cache;
shls[0] = ish;
shls[1] = jsh;
for (kshp = 0; kshp < ksh1-ksh0; kshp++) {
for (lshp = 0; lshp <= kshp; lshp++) {
ksh = kshp + ksh0;
lsh = lshp + lsh0;
shls[2] = ksh;
shls[3] = lsh;
k0 = ao_loc[ksh] - ao_loc[ksh0];
l0 = ao_loc[lsh] - ao_loc[lsh0];
dk = ao_loc[ksh+1] - ao_loc[ksh];
dl = ao_loc[lsh+1] - ao_loc[lsh];
dijk = dij * dk;
dijkl = dijk * dl;
cache = buf + dijkl * comp;
if ((*fprescreen)(shls, atm, bas, env) &&
(*intor)(buf, NULL, shls, atm, natm, bas, nbas, env, cintopt, cache)) {
eri0 = eri + k0*(k0+1)/2+l0;
buf0 = buf;
for (icomp = 0; icomp < comp; icomp++) {
peri0 = eri0;
if (kshp > lshp && ishp > jshp) {
for (i = 0; i < di; i++, peri0+=nkl*(i0+i)) {
for (j = 0; j < dj; j++) {
peri = peri0 + nkl*j;
for (k = 0; k < dk; k++, peri+=k0+k) {
for (pbuf = buf0 + k*dij + j*di + i,
l = 0; l < dl; l++) {
peri[l] = pbuf[l*dijk];
} }
} }
} else if (ish > jsh) {
for (i = 0; i < di; i++, peri0+=nkl*(i0+i)) {
for (j = 0; j < dj; j++) {
peri = peri0 + nkl*j;
for (k = 0; k < dk; k++, peri+=k0+k) {
for (pbuf = buf0 + k*dij + j*di + i,
l = 0; l <= k; l++) {
peri[l] = pbuf[l*dijk];
} }
} }
} else if (ksh > lsh) {
for (i = 0; i < di; i++, peri0+=nkl*(i0+i)) {
for (j = 0; j <= i; j++) {
peri = peri0 + nkl*j;
for (k = 0; k < dk; k++, peri+=k0+k) {
for (pbuf = buf0 + k*dij + j*di + i,
l = 0; l < dl; l++) {
peri[l] = pbuf[l*dijk];
} }
} }
} else {
for (i = 0; i < di; i++, peri0+=nkl*(i0+i)) {
for (j = 0; j <= i; j++) {
peri = peri0 + nkl*j;
for (k = 0; k < dk; k++, peri+=k0+k) {
for (pbuf = buf0 + k*dij + j*di + i,
l = 0; l <= k; l++) {
peri[l] = pbuf[l*dijk];
} }
} }
}
buf0 += dijkl;
eri0 += neri;
}
} else {
eri0 = eri + k0*(k0+1)/2+l0;
buf0 = buf;
for (icomp = 0; icomp < comp; icomp++) {
peri0 = eri0;
if (kshp > lshp && ishp > jshp) {
for (i = 0; i < di; i++, peri0+=nkl*(i0+i)) {
for (j = 0; j < dj; j++) {
peri = peri0 + nkl*j;
for (k = 0; k < dk; k++, peri+=k0+k) {
for (l = 0; l < dl; l++) {
peri[l] = 0;
} }
} }
} else if (ish > jsh) {
for (i = 0; i < di; i++, peri0+=nkl*(i0+i)) {
for (j = 0; j < dj; j++) {
peri = peri0 + nkl*j;
for (k = 0; k < dk; k++, peri+=k0+k) {
for (l = 0; l <= k; l++) {
peri[l] = 0;
} }
} }
} else if (ksh > lsh) {
for (i = 0; i < di; i++, peri0+=nkl*(i0+i)) {
for (j = 0; j <= i; j++) {
peri = peri0 + nkl*j;
for (k = 0; k < dk; k++, peri+=k0+k) {
for (l = 0; l < dl; l++) {
peri[l] = 0;
} }
} }
} else {
for (i = 0; i < di; i++, peri0+=nkl*(i0+i)) {
for (j = 0; j <= i; j++) {
peri = peri0 + nkl*j;
for (k = 0; k < dk; k++, peri+=k0+k) {
for (l = 0; l <= k; l++) {
peri[l] = 0;
} }
} }
}
eri0 += neri;
}
}
} }
}
static int no_prescreen()
{
return 1;
}
void GTOnr2e_fill_drv(int (*intor)(), void (*fill)(), int (*fprescreen)(),
double *eri, int comp,
int *shls_slice, int *ao_loc, CINTOpt *cintopt,
int *atm, int natm, int *bas, int nbas, double *env)
{
if (fprescreen == NULL) {
fprescreen = no_prescreen;
}
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const int jsh1 = shls_slice[3];
const int nish = ish1 - ish0;
const int njsh = jsh1 - jsh0;
const int di = GTOmax_shell_dim(ao_loc, shls_slice, 4);
const int cache_size = GTOmax_cache_size(intor, shls_slice, 4,
atm, natm, bas, nbas, env);
#pragma omp parallel
{
int ij, i, j;
double *buf = malloc(sizeof(double) * (di*di*di*di*comp + cache_size));
#pragma omp for nowait schedule(dynamic)
for (ij = 0; ij < nish*njsh; ij++) {
i = ij / njsh;
j = ij % njsh;
(*fill)(intor, fprescreen, eri, buf, comp, i, j, shls_slice,
ao_loc, cintopt, atm, natm, bas, nbas, env);
}
free(buf);
}
}
|
imd_deform.c |
/******************************************************************************
*
* IMD -- The ITAP Molecular Dynamics Program
*
* Copyright 1996-2007 Institute for Theoretical and Applied Physics,
* University of Stuttgart, D-70550 Stuttgart
*
******************************************************************************/
/******************************************************************************
*
* imd_deform.c -- deform sample
*
******************************************************************************/
/******************************************************************************
* $Revision$
* $Date$
******************************************************************************/
#include "imd.h"
#ifdef HOMDEF /* homogeneous deformation with pbc */
/*****************************************************************************
*
* lin_deform()
*
*****************************************************************************/
#ifdef TWOD
void lin_deform(vektor dx, vektor dy, real scale)
#else
void lin_deform(vektor dx, vektor dy, vektor dz, real scale)
#endif
{
int k;
real tmpbox[3];
#ifdef DEBUG
printf("in lindef, myid: %d scale %lf dx %lf %lf %lf dy %lf %lf %lf dz %lf %lf %lf \n",myid,scale,dx.x,dx.y,dx.z,dy.x,dy.y,dy.z,dz.x,dz.y,dz.z);fflush(stdout);
#endif
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (k=0; k<NCELLS; ++k) {
int i;
cell *p;
real tmport[3];
p = CELLPTR(k);
for (i=0; i<p->n; ++i) {
/* transform atom positions */
#ifdef TWOD
tmport[0] = dx.x * ORT(p,i,X) + dx.y * ORT(p,i,Y);
tmport[1] = dy.x * ORT(p,i,X) + dy.y * ORT(p,i,Y);
#else
tmport[0] = dx.x * ORT(p,i,X) + dx.y * ORT(p,i,Y) + dx.z * ORT(p,i,Z);
tmport[1] = dy.x * ORT(p,i,X) + dy.y * ORT(p,i,Y) + dy.z * ORT(p,i,Z);
tmport[2] = dz.x * ORT(p,i,X) + dz.y * ORT(p,i,Y) + dz.z * ORT(p,i,Z);
#endif
ORT(p,i,X) += scale * tmport[0];
ORT(p,i,Y) += scale * tmport[1];
#ifndef TWOD
ORT(p,i,Z) += scale * tmport[2];
#endif
}
}
/* transform first box vector */
tmpbox[0] = scale * SPROD(dx,box_x);
tmpbox[1] = scale * SPROD(dy,box_x);
#ifndef TWOD
tmpbox[2] = scale * SPROD(dz,box_x);
#endif
box_x.x += tmpbox[0];
box_x.y += tmpbox[1];
#ifndef TWOD
box_x.z += tmpbox[2];
#endif
/* transform second box vector */
tmpbox[0] = scale * SPROD(dx,box_y);
tmpbox[1] = scale * SPROD(dy,box_y);
#ifndef TWOD
tmpbox[2] = scale * SPROD(dz,box_y);
#endif
box_y.x += tmpbox[0];
box_y.y += tmpbox[1];
#ifndef TWOD
box_y.z += tmpbox[2];
#endif
/* transform third box vector */
#ifndef TWOD
tmpbox[0] = scale * SPROD(dx,box_z);
tmpbox[1] = scale * SPROD(dy,box_z);
tmpbox[2] = scale * SPROD(dz,box_z);
box_z.x += tmpbox[0];
box_z.y += tmpbox[1];
box_z.z += tmpbox[2];
#endif
/* apply box changes */
make_box();
#ifdef DAMP /* deform the stadium correspondingly */
tmpbox[0] = scale * SPROD(dx,center);
tmpbox[1] = scale * SPROD(dy,center);
#ifndef TWOD
tmpbox[2] = scale * SPROD(dz,center);
#endif
#endif
} /* lin_deform */
/*****************************************************************************
*
* relax_pressure()
*
*****************************************************************************/
void relax_pressure()
{
real Epot, Temp, vol;
#ifdef TWOD
vektor dx = {0.0, 0.0}, dy = {0.0, 0.0};
#else
vektor dx = {0.0, 0.0, 0.0}, dy = {0.0, 0.0, 0.0}, dz = {0.0, 0.0, 0.0};
#endif
sym_tensor pt;
real pp;
#ifdef STRESS_TENS
/* here, we support relaxation to arbitrary external pressure tensor */
calc_tot_presstens();
pt.xx = tot_presstens.xx / volume - presstens_ext.xx;
pt.yy = tot_presstens.yy / volume - presstens_ext.yy;
pt.xy = tot_presstens.xy / volume - presstens_ext.xy;
#ifndef TWOD
pt.zz = tot_presstens.zz / volume - presstens_ext.zz;
pt.yz = tot_presstens.yz / volume - presstens_ext.yz;
pt.zx = tot_presstens.zx / volume - presstens_ext.zx;
/* we want to relax stresses e.g. orthogonal to a strained direction */
/* pp = (pt.xx + pt.yy + pt.zz) / 3.0; */
pp = (pt.xx*relax_dirs.x + pt.yy*relax_dirs.y + pt.zz*relax_dirs.z) / (relax_dirs.x+ relax_dirs.y+relax_dirs.z);
#else
pp = (pt.xx + pt.yy) / 2.0;
#endif
if ((relax_mode == RELAX_FULL) || (relax_mode == RELAX_AXIAL)) {
dx.x = pp / bulk_module + (pt.xx - pp) / shear_module;
dy.y = pp / bulk_module + (pt.yy - pp) / shear_module;
#ifndef TWOD
dz.z = pp / bulk_module + (pt.zz - pp) / shear_module;
#endif
} else {
dx.x = pp / bulk_module;
dy.y = pp / bulk_module;
#ifndef TWOD
dz.z = pp / bulk_module;
#endif
}
if (relax_mode == RELAX_FULL) {
dx.y = dy.x = pt.xy / shear_module;
#ifndef TWOD
dy.z = dz.y = pt.yz / shear_module;
dz.x = dx.z = pt.zx / shear_module;
#endif
}
#else /* not STRESS_TENS */
if (ensemble == ENS_CG) {
Temp = 0.0;
} else {
#ifdef UNIAX
Temp = 2.0 * tot_kin_energy / (nactive + nactive_rot);
#elif defined(DAMP)
Temp = 2.0 * tot_kin_energy / (nactive - n_damp);
#else
Temp = 2.0 * tot_kin_energy / nactive;
#endif
}
#ifdef STM
Temp = 2.0 * tot_kin_energy / (nactive - n_stadium);
#endif
vol = volume / natoms;
pressure = Temp / vol + virial / (DIM * volume);
/* here, we support only relaxation to scalar pressure zero */
dx.x = pressure / bulk_module ;
dy.y = pressure / bulk_module ;
#ifndef TWOD
dz.z = pressure / bulk_module ;
#endif
#endif /* not STRESS_TENS */
if (relax_mode == RELAX_AXIAL) {
dx.x *= relax_dirs.x;
dy.y *= relax_dirs.y;
#ifndef TWOD
dz.z *= relax_dirs.z;
#endif
}
#ifdef TWOD
lin_deform(dx, dy, relax_rate);
#else
lin_deform(dx, dy, dz, relax_rate);
#endif
}
#endif /* HOMDEF */
#ifdef DEFORM
/*****************************************************************************
*
* deform_sample()
*
*****************************************************************************/
void deform_sample(void)
{
int k;
/* loop over all atoms */
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (k=0; k<NCELLS; ++k) {
int i;
cell *p;
int sort;
vektor ort;
real shear;
p = CELLPTR(k);
for (i=0; i<p->n; ++i) {
sort = VSORTE(p,i);
if ( *(shear_def + sort) == 1 ) {
ort.x = ORT(p,i,X) - (deform_base + sort)->x;
ort.y = ORT(p,i,Y) - (deform_base + sort)->y;
#ifndef TWOD
ort.z = ORT(p,i,Z) - (deform_base + sort)->z;
#endif
shear = SPROD( *(deform_shear + sort), ort);
}
else
shear = 1.0;
/* move particles with virtual types */
ORT(p,i,X) += shear * deform_size * (deform_shift + sort)->x;
ORT(p,i,Y) += shear * deform_size * (deform_shift + sort)->y;
#ifndef TWOD
ORT(p,i,Z) += shear * deform_size * (deform_shift + sort)->z;
#endif
}
}
}
#endif /* DEFORM */
#ifdef CYCLE
void init_cycle(void)
{
int i, n;
double T,A,a;
if (0==myid)
{
T=1.0/lindef_freq;
printf("CYCLE: cyclic deformation with frequency f = %f -> T = 1/f = %f\n",lindef_freq,T);
printf("CYCLE: desired strain amplitude: %f\n",lindef_size);
n = (int) (T/4/timestep);
printf("CYCLE: -> timesteps till max. strain: %d\n",n);
if (n*timestep*4.0 != T)
error("CYCLE: 1/(4*lindef_freq) is not an integer value.");
}
}
#endif
|
parallel_section_reduction.c | // Skip testing on 64 bit systems for now!
#ifndef __LP64__
#include <stdio.h>
#include <math.h>
#include "omp_testsuite.h"
int
check_parallel_section_reduction (FILE * logFile)
{
int sum = 7;
int known_sum;
double dpt, dsum = 0;
double dknown_sum;
double dt = 0.5; /* base of geometric row for + and - test */
double rounding_error = 1.E-5;
int diff;
double ddiff;
int product = 1;
int known_product;
int logic_and = 1;
int bit_and = 1;
int logic_or = 0;
int bit_or = 0;
int exclusiv_bit_or = 0;
int logics[1000];
int i;
int result = 0;
/* int my_islarger; */
/*int is_larger=1; */
dt = 1. / 3.;
known_sum = (999 * 1000) / 2 + 7;
#pragma omp parallel sections private(i) reduction(+:sum)
{
#pragma omp section
{
for (i = 1; i < 300; i++)
{
sum = sum + i;
}
}
#pragma omp section
{
for (i = 300; i < 700; i++)
{
sum = sum + i;
}
}
#pragma omp section
{
for (i = 700; i < 1000; i++)
{
sum = sum + i;
}
}
}
if (known_sum != sum)
{
++result;
fprintf (logFile,
"Error in sum with integers: Result was %d instead of %d\n",
sum, known_sum);
}
diff = (999 * 1000) / 2;
#pragma omp parallel sections private(i) reduction(-:diff)
{
#pragma omp section
{
for (i = 1; i < 300; i++)
{
diff = diff - i;
}
}
#pragma omp section
{
for (i = 300; i < 700; i++)
{
diff = diff - i;
}
}
#pragma omp section
{
for (i = 700; i < 1000; i++)
{
diff = diff - i;
}
}
}
if (diff != 0)
{
result++;
fprintf (logFile,
"Error in Difference with integers: Result was %d instead of 0.\n",
diff);
}
for (i = 0; i < 20; ++i)
{
dpt *= dt;
}
dknown_sum = (1 - dpt) / (1 - dt);
#pragma omp parallel sections private(i) reduction(+:dsum)
{
#pragma omp section
{
for (i = 0; i < 6; ++i)
{
dsum += pow (dt, i);
}
}
#pragma omp section
{
for (i = 6; i < 12; ++i)
{
dsum += pow (dt, i);
}
}
#pragma omp section
{
for (i = 12; i < 20; ++i)
{
dsum += pow (dt, i);
}
}
}
if (fabs (dsum - dknown_sum) > rounding_error)
{
result++;
fprintf (logFile,
"Error in sum with doubles: Result was %f instead of %f (Difference: %E)\n",
dsum, dknown_sum, dsum - dknown_sum);
}
dpt = 1;
for (i = 0; i < 20; ++i)
{
dpt *= dt;
}
fprintf (logFile, "\n");
ddiff = (1 - dpt) / (1 - dt);
#pragma omp parallel sections private(i) reduction(-:ddiff)
{
#pragma omp section
{
for (i = 0; i < 6; ++i)
{
ddiff -= pow (dt, i);
}
}
#pragma omp section
{
for (i = 6; i < 12; ++i)
{
ddiff -= pow (dt, i);
}
}
#pragma omp section
{
for (i = 12; i < 20; ++i)
{
ddiff -= pow (dt, i);
}
}
}
if (fabs (ddiff) > rounding_error)
{
result++;
fprintf (logFile,
"Error in Difference with doubles: Result was %E instead of 0.0\n",
ddiff);
}
known_product = 3628800;
#pragma omp parallel sections private(i) reduction(*:product)
{
#pragma omp section
{
for (i = 1; i < 3; i++)
{
product *= i;
}
}
#pragma omp section
{
for (i = 3; i < 7; i++)
{
product *= i;
}
}
#pragma omp section
{
for (i = 7; i < 11; i++)
{
product *= i;
}
}
}
if (known_product != product)
{
result++;
fprintf (logFile,
"Error in Product with integers: Result was %d instead of %d\n",
product, known_product);
}
for (i = 0; i < 1000; i++)
{
logics[i] = 1;
}
#pragma omp parallel sections private(i) reduction(&&:logic_and)
{
#pragma omp section
{
for (i = 1; i < 300; i++)
{
logic_and = (logic_and && logics[i]);
}
}
#pragma omp section
{
for (i = 300; i < 700; i++)
{
logic_and = (logic_and && logics[i]);
}
}
#pragma omp section
{
for (i = 700; i < 1000; i++)
{
logic_and = (logic_and && logics[i]);
}
}
}
if (!logic_and)
{
result++;
fprintf (logFile, "Error in logic AND part 1\n");
}
logic_and = 1;
logics[501] = 0;
#pragma omp parallel sections private(i) reduction(&&:logic_and)
{
#pragma omp section
{
for (i = 1; i < 300; i++)
{
logic_and = (logic_and && logics[i]);
}
}
#pragma omp section
{
for (i = 300; i < 700; i++)
{
logic_and = (logic_and && logics[i]);
}
}
#pragma omp section
{
for (i = 700; i < 1000; i++)
{
logic_and = (logic_and && logics[i]);
}
}
}
if (logic_and)
{
result++;
fprintf (logFile, "Error in logic AND part 2\n");
}
for (i = 0; i < 1000; i++)
{
logics[i] = 0;
}
#pragma omp parallel sections private(i) reduction(||:logic_or)
{
#pragma omp section
{
for (i = 1; i < 300; i++)
{
logic_or = (logic_or || logics[i]);
}
}
#pragma omp section
{
for (i = 300; i < 700; i++)
{
logic_or = (logic_or || logics[i]);
}
}
#pragma omp section
{
for (i = 700; i < 1000; i++)
{
logic_or = (logic_or || logics[i]);
}
}
}
if (logic_or)
{
result++;
fprintf (logFile, "\nError in logic OR part 1\n");
}
logic_or = 0;
logics[501] = 1;
#pragma omp parallel sections private(i) reduction(||:logic_or)
{
#pragma omp section
{
for (i = 1; i < 300; i++)
{
logic_or = (logic_or || logics[i]);
}
}
#pragma omp section
{
for (i = 300; i < 700; i++)
{
logic_or = (logic_or || logics[i]);
}
}
#pragma omp section
{
for (i = 700; i < 1000; i++)
{
logic_or = (logic_or || logics[i]);
}
}
}
if (!logic_or)
{
result++;
fprintf (logFile, "Error in logic OR part 2\n");
}
for (i = 0; i < 1000; ++i)
{
logics[i] = 1;
}
#pragma omp parallel sections private(i) reduction(&:bit_and)
{
#pragma omp section
{
for (i = 0; i < 300; ++i)
{
bit_and = (bit_and & logics[i]);
}
}
#pragma omp section
{
for (i = 300; i < 700; ++i)
{
bit_and = (bit_and & logics[i]);
}
}
#pragma omp section
{
for (i = 700; i < 1000; ++i)
{
bit_and = (bit_and & logics[i]);
}
}
}
if (!bit_and)
{
result++;
fprintf (logFile, "Error in BIT AND part 1\n");
}
bit_and = 1;
logics[501] = 0;
#pragma omp parallel sections private(i) reduction(&:bit_and)
{
#pragma omp section
{
for (i = 0; i < 300; ++i)
{
bit_and = bit_and & logics[i];
}
}
#pragma omp section
{
for (i = 300; i < 700; ++i)
{
bit_and = bit_and & logics[i];
}
}
#pragma omp section
{
for (i = 700; i < 1000; ++i)
{
bit_and = bit_and & logics[i];
}
}
}
if (bit_and)
{
result++;
fprintf (logFile, "Error in BIT AND part 2\n");
}
for (i = 0; i < 1000; i++)
{
logics[i] = 0;
}
#pragma omp parallel sections private(i) reduction(|:bit_or)
{
#pragma omp section
{
for (i = 0; i < 300; ++i)
{
bit_or = bit_or | logics[i];
}
}
#pragma omp section
{
for (i = 300; i < 700; ++i)
{
bit_or = bit_or | logics[i];
}
}
#pragma omp section
{
for (i = 700; i < 1000; ++i)
{
bit_or = bit_or | logics[i];
}
}
}
if (bit_or)
{
result++;
fprintf (logFile, "Error in BIT OR part 1\n");
}
bit_or = 0;
logics[501] = 1;
#pragma omp parallel sections private(i) reduction(|:bit_or)
{
#pragma omp section
{
for (i = 0; i < 300; ++i)
{
bit_or = bit_or | logics[i];
}
}
#pragma omp section
{
for (i = 300; i < 700; ++i)
{
bit_or = bit_or | logics[i];
}
}
#pragma omp section
{
for (i = 700; i < 1000; ++i)
{
bit_or = bit_or | logics[i];
}
}
}
if (!bit_or)
{
result++;
fprintf (logFile, "Error in BIT OR part 2\n");
}
for (i = 0; i < 1000; i++)
{
logics[i] = 0;
}
#pragma omp parallel sections private(i) reduction(^:exclusiv_bit_or)
{
#pragma omp section
{
for (i = 0; i < 300; ++i)
{
exclusiv_bit_or = exclusiv_bit_or ^ logics[i];
}
}
#pragma omp section
{
for (i = 300; i < 700; ++i)
{
exclusiv_bit_or = exclusiv_bit_or ^ logics[i];
}
}
#pragma omp section
{
for (i = 700; i < 1000; ++i)
{
exclusiv_bit_or = exclusiv_bit_or ^ logics[i];
}
}
}
if (exclusiv_bit_or)
{
result++;
fprintf (logFile, "Error in EXCLUSIV BIT OR part 1\n");
}
exclusiv_bit_or = 0;
logics[501] = 1;
#pragma omp parallel sections private(i) reduction(^:exclusiv_bit_or)
{
#pragma omp section
{
for (i = 0; i < 300; ++i)
{
exclusiv_bit_or = exclusiv_bit_or ^ logics[i];
}
}
#pragma omp section
{
for (i = 300; i < 700; ++i)
{
exclusiv_bit_or = exclusiv_bit_or ^ logics[i];
}
}
#pragma omp section
{
for (i = 700; i < 1000; ++i)
{
exclusiv_bit_or = exclusiv_bit_or ^ logics[i];
}
}
}
if (!exclusiv_bit_or)
{
result++;
fprintf (logFile, "Error in EXCLUSIV BIT OR part 2\n");
}
/*printf("\nResult:%d\n",result); */
return (result == 0);
}
int
crosscheck_parallel_section_reduction (FILE * logFile)
{
int sum = 7;
int known_sum;
double dpt, dsum = 0;
double dknown_sum;
double dt = 0.5; /* base of geometric row for + and - test */
double rounding_error = 1.E-5;
int diff;
double ddiff;
int product = 1;
int known_product;
int logic_and = 1;
int bit_and = 1;
int logic_or = 0;
int bit_or = 0;
int exclusiv_bit_or;
int logics[1000];
int i;
int result = 0;
/* int my_islarger; */
/*int is_larger=1; */
known_sum = (999 * 1000) / 2 + 7;
dt = 1. / 3.;
#pragma omp parallel sections private(i)
{
#pragma omp section
{
for (i = 1; i < 300; i++)
{
sum = sum + i;
}
}
#pragma omp section
{
for (i = 300; i < 700; i++)
{
sum = sum + i;
}
}
#pragma omp section
{
for (i = 700; i < 1000; i++)
{
sum = sum + i;
}
}
}
if (known_sum != sum)
{
++result;
/*printf("\nError in Sum with integers\n"); */
}
diff = (999 * 1000) / 2;
#pragma omp parallel sections private(i)
{
#pragma omp section
{
for (i = 1; i < 300; i++)
{
diff = diff - i;
}
}
#pragma omp section
{
for (i = 300; i < 700; i++)
{
diff = diff - i;
}
}
#pragma omp section
{
for (i = 700; i < 1000; i++)
{
diff = diff - i;
}
}
}
if (diff != 0)
{
result++;
/*printf("\nError in Difference: Result was %d instead of 0.\n",diff); */
}
for (i = 0; i < 20; ++i)
{
dpt *= dt;
}
dknown_sum = (1 - dpt) / (1 - dt);
#pragma omp parallel sections private(i)
{
#pragma omp section
{
for (i = 0; i < 6; ++i)
{
dsum += pow (dt, i);
}
}
#pragma omp section
{
for (i = 6; i < 12; ++i)
{
dsum += pow (dt, i);
}
}
#pragma omp section
{
for (i = 12; i < 20; ++i)
{
dsum += pow (dt, i);
}
}
}
if (fabs (dsum - dknown_sum) > rounding_error)
{
result++;
fprintf (logFile,
"Error in sum with doubles: Result was %f instead of %f (Difference: %E)\n",
dsum, dknown_sum, dsum - dknown_sum);
}
dpt = 1;
for (i = 0; i < 20; ++i)
{
dpt *= dt;
}
fprintf (logFile, "\n");
ddiff = (1 - dpt) / (1 - dt);
#pragma omp parallel sections private(i)
{
#pragma omp section
{
for (i = 0; i < 6; ++i)
{
ddiff -= pow (dt, i);
}
}
#pragma omp section
{
for (i = 6; i < 12; ++i)
{
ddiff -= pow (dt, i);
}
}
#pragma omp section
{
for (i = 12; i < 20; ++i)
{
ddiff -= pow (dt, i);
}
}
}
if (fabs (ddiff) > rounding_error)
{
result++;
fprintf (logFile,
"Error in Difference with doubles: Result was %E instead of 0.0\n",
ddiff);
}
known_product = 3628800;
#pragma omp parallel sections private(i)
{
#pragma omp section
{
for (i = 1; i < 3; i++)
{
product *= i;
}
}
#pragma omp section
{
for (i = 3; i < 7; i++)
{
product *= i;
}
}
#pragma omp section
{
for (i = 7; i < 11; i++)
{
product *= i;
}
}
}
if (known_product != product)
{
result++;
/*printf("\nError in Product: Known Product: %d\tcalculated Product: %d\n\n",known_product,product); */
}
for (i = 0; i < 1000; i++)
{
logics[i] = 1;
}
#pragma omp parallel sections private(i)
{
#pragma omp section
{
for (i = 1; i < 300; i++)
{
logic_and = (logic_and && logics[i]);
}
}
#pragma omp section
{
for (i = 300; i < 700; i++)
{
logic_and = (logic_and && logics[i]);
}
}
#pragma omp section
{
for (i = 700; i < 1000; i++)
{
logic_and = (logic_and && logics[i]);
}
}
}
if (!logic_and)
{
result++;
/*printf("Error in AND part 1\n"); */
}
logic_and = 1;
logics[501] = 0;
#pragma omp parallel sections private(i)
{
#pragma omp section
{
for (i = 1; i < 300; i++)
{
logic_and = (logic_and && logics[i]);
}
}
#pragma omp section
{
for (i = 300; i < 700; i++)
{
logic_and = (logic_and && logics[i]);
}
}
#pragma omp section
{
for (i = 700; i < 1000; i++)
{
logic_and = (logic_and && logics[i]);
}
}
}
if (logic_and)
{
result++;
/*printf("Error in AND part 2"); */
}
for (i = 0; i < 1000; i++)
{
logics[i] = 0;
}
#pragma omp parallel sections private(i)
{
#pragma omp section
{
for (i = 1; i < 300; i++)
{
logic_or = (logic_or && logics[i]);
}
}
#pragma omp section
{
for (i = 300; i < 700; i++)
{
logic_or = (logic_or && logics[i]);
}
}
#pragma omp section
{
for (i = 700; i < 1000; i++)
{
logic_or = (logic_or && logics[i]);
}
}
}
if (logic_or)
{
result++;
/*printf("\nError in OR part 1\n"); */
}
logic_or = 0;
logics[501] = 1;
#pragma omp parallel sections private(i)
{
#pragma omp section
{
for (i = 1; i < 300; i++)
{
logic_or = (logic_or || logics[i]);
}
}
#pragma omp section
{
for (i = 300; i < 700; i++)
{
logic_or = (logic_or || logics[i]);
}
}
#pragma omp section
{
for (i = 700; i < 1000; i++)
{
logic_or = (logic_or || logics[i]);
}
}
}
if (!logic_or)
{
result++;
/*printf("\nError in OR part 2\n"); */
}
for (i = 0; i < 1000; ++i)
{
logics[i] = 1;
}
#pragma omp parallel sections private(i)
{
#pragma omp section
{
for (i = 0; i < 300; ++i)
{
bit_and = (bit_and & logics[i]);
}
}
#pragma omp section
{
for (i = 300; i < 700; ++i)
{
bit_and = (bit_and & logics[i]);
}
}
#pragma omp section
{
for (i = 700; i < 1000; ++i)
{
bit_and = (bit_and & logics[i]);
}
}
}
if (!bit_and)
{
result++;
/*printf("Error in BIT AND part 1\n"); */
}
bit_and = 1;
logics[501] = 0;
#pragma omp parallel sections private(i)
{
#pragma omp section
{
for (i = 0; i < 300; ++i)
{
bit_and = bit_and & logics[i];
}
}
#pragma omp section
{
for (i = 300; i < 700; ++i)
{
bit_and = bit_and & logics[i];
}
}
#pragma omp section
{
for (i = 700; i < 1000; ++i)
{
bit_and = bit_and & logics[i];
}
}
}
if (bit_and)
{
result++;
/*printf("Error in BIT AND part 2"); */
}
for (i = 0; i < 1000; i++)
{
logics[i] = 0;
}
#pragma omp parallel sections private(i)
{
#pragma omp section
{
for (i = 0; i < 300; ++i)
{
bit_or = bit_or | logics[i];
}
}
#pragma omp section
{
for (i = 300; i < 700; ++i)
{
bit_or = bit_or | logics[i];
}
}
#pragma omp section
{
for (i = 700; i < 1000; ++i)
{
bit_or = bit_or | logics[i];
}
}
}
if (bit_or)
{
result++;
/*printf("Error in BIT OR part 1\n"); */
}
bit_or = 0;
logics[501] = 1;
#pragma omp parallel sections private(i)
{
#pragma omp section
{
for (i = 0; i < 300; ++i)
{
bit_or = bit_or | logics[i];
}
}
#pragma omp section
{
for (i = 300; i < 700; ++i)
{
bit_or = bit_or | logics[i];
}
}
#pragma omp section
{
for (i = 700; i < 1000; ++i)
{
bit_or = bit_or | logics[i];
}
}
}
if (!bit_or)
{
result++;
/*printf("Error in BIT OR part 2\n"); */
}
for (i = 0; i < 1000; i++)
{
logics[i] = 0;
}
#pragma omp parallel sections private(i)
{
#pragma omp section
{
for (i = 0; i < 300; ++i)
{
exclusiv_bit_or = exclusiv_bit_or | logics[i];
}
}
#pragma omp section
{
for (i = 300; i < 700; ++i)
{
exclusiv_bit_or = exclusiv_bit_or | logics[i];
}
}
#pragma omp section
{
for (i = 700; i < 1000; ++i)
{
exclusiv_bit_or = exclusiv_bit_or | logics[i];
}
}
}
if (exclusiv_bit_or)
{
result++;
/*printf("Error in EXCLUSIV BIT OR part 1\n"); */
}
exclusiv_bit_or = 0;
logics[501] = 1;
#pragma omp parallel sections private(i)
{
#pragma omp section
{
for (i = 0; i < 300; ++i)
{
exclusiv_bit_or = exclusiv_bit_or | logics[i];
}
}
#pragma omp section
{
for (i = 300; i < 700; ++i)
{
exclusiv_bit_or = exclusiv_bit_or | logics[i];
}
}
#pragma omp section
{
for (i = 700; i < 1000; ++i)
{
exclusiv_bit_or = exclusiv_bit_or | logics[i];
}
}
}
if (!exclusiv_bit_or)
{
result++;
/*printf("Error in EXCLUSIV BIT OR part 2\n"); */
}
/*printf("\nResult:%d\n",result); */
return (result == 0);
}
#else
#warning "Not tested on 64 bit systems"
#endif
|
divsufsort.c | /*
* divsufsort.c for libdivsufsort-lite
* Copyright (c) 2003-2008 Yuta Mori All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
/*- Compiler specifics -*/
#if defined(_MSC_VER)
# pragma warning(disable : 4244)
# pragma warning(disable : 4127) /* C4127 : Condition expression is constant */
#endif
/*- Dependencies -*/
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include "divsufsort.h"
/*- Constants -*/
#if defined(INLINE)
# undef INLINE
#endif
#if !defined(INLINE)
# define INLINE __inline
#endif
#if defined(ALPHABET_SIZE) && (ALPHABET_SIZE < 1)
# undef ALPHABET_SIZE
#endif
#if !defined(ALPHABET_SIZE)
# define ALPHABET_SIZE (256)
#endif
#define BUCKET_A_SIZE (ALPHABET_SIZE)
#define BUCKET_B_SIZE (ALPHABET_SIZE * ALPHABET_SIZE)
#if defined(SS_INSERTIONSORT_THRESHOLD)
# if SS_INSERTIONSORT_THRESHOLD < 1
# undef SS_INSERTIONSORT_THRESHOLD
# define SS_INSERTIONSORT_THRESHOLD (1)
# endif
#else
# define SS_INSERTIONSORT_THRESHOLD (8)
#endif
#if defined(SS_BLOCKSIZE)
# if SS_BLOCKSIZE < 0
# undef SS_BLOCKSIZE
# define SS_BLOCKSIZE (0)
# elif 32768 <= SS_BLOCKSIZE
# undef SS_BLOCKSIZE
# define SS_BLOCKSIZE (32767)
# endif
#else
# define SS_BLOCKSIZE (1024)
#endif
/* minstacksize = log(SS_BLOCKSIZE) / log(3) * 2 */
#if SS_BLOCKSIZE == 0
# define SS_MISORT_STACKSIZE (96)
#elif SS_BLOCKSIZE <= 4096
# define SS_MISORT_STACKSIZE (16)
#else
# define SS_MISORT_STACKSIZE (24)
#endif
#define SS_SMERGE_STACKSIZE (32)
#define TR_INSERTIONSORT_THRESHOLD (8)
#define TR_STACKSIZE (64)
/*- Macros -*/
#ifndef SWAP
# define SWAP(_a, _b) do { t = (_a); (_a) = (_b); (_b) = t; } while(0)
#endif /* SWAP */
#ifndef MIN
# define MIN(_a, _b) (((_a) < (_b)) ? (_a) : (_b))
#endif /* MIN */
#ifndef MAX
# define MAX(_a, _b) (((_a) > (_b)) ? (_a) : (_b))
#endif /* MAX */
#define STACK_PUSH(_a, _b, _c, _d)\
do {\
assert(ssize < STACK_SIZE);\
stack[ssize].a = (_a), stack[ssize].b = (_b),\
stack[ssize].c = (_c), stack[ssize++].d = (_d);\
} while(0)
#define STACK_PUSH5(_a, _b, _c, _d, _e)\
do {\
assert(ssize < STACK_SIZE);\
stack[ssize].a = (_a), stack[ssize].b = (_b),\
stack[ssize].c = (_c), stack[ssize].d = (_d), stack[ssize++].e = (_e);\
} while(0)
#define STACK_POP(_a, _b, _c, _d)\
do {\
assert(0 <= ssize);\
if(ssize == 0) { return; }\
(_a) = stack[--ssize].a, (_b) = stack[ssize].b,\
(_c) = stack[ssize].c, (_d) = stack[ssize].d;\
} while(0)
#define STACK_POP5(_a, _b, _c, _d, _e)\
do {\
assert(0 <= ssize);\
if(ssize == 0) { return; }\
(_a) = stack[--ssize].a, (_b) = stack[ssize].b,\
(_c) = stack[ssize].c, (_d) = stack[ssize].d, (_e) = stack[ssize].e;\
} while(0)
#define BUCKET_A(_c0) bucket_A[(_c0)]
#if ALPHABET_SIZE == 256
#define BUCKET_B(_c0, _c1) (bucket_B[((_c1) << 8) | (_c0)])
#define BUCKET_BSTAR(_c0, _c1) (bucket_B[((_c0) << 8) | (_c1)])
#else
#define BUCKET_B(_c0, _c1) (bucket_B[(_c1) * ALPHABET_SIZE + (_c0)])
#define BUCKET_BSTAR(_c0, _c1) (bucket_B[(_c0) * ALPHABET_SIZE + (_c1)])
#endif
/*- Private Functions -*/
static const int lg_table[256]= {
-1,0,1,1,2,2,2,2,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,
5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7
};
#if (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE)
static INLINE
int
ss_ilg(int n) {
#if SS_BLOCKSIZE == 0
return (n & 0xffff0000) ?
((n & 0xff000000) ?
24 + lg_table[(n >> 24) & 0xff] :
16 + lg_table[(n >> 16) & 0xff]) :
((n & 0x0000ff00) ?
8 + lg_table[(n >> 8) & 0xff] :
0 + lg_table[(n >> 0) & 0xff]);
#elif SS_BLOCKSIZE < 256
return lg_table[n];
#else
return (n & 0xff00) ?
8 + lg_table[(n >> 8) & 0xff] :
0 + lg_table[(n >> 0) & 0xff];
#endif
}
#endif /* (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE) */
#if SS_BLOCKSIZE != 0
static const int sqq_table[256] = {
0, 16, 22, 27, 32, 35, 39, 42, 45, 48, 50, 53, 55, 57, 59, 61,
64, 65, 67, 69, 71, 73, 75, 76, 78, 80, 81, 83, 84, 86, 87, 89,
90, 91, 93, 94, 96, 97, 98, 99, 101, 102, 103, 104, 106, 107, 108, 109,
110, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126,
128, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142,
143, 144, 144, 145, 146, 147, 148, 149, 150, 150, 151, 152, 153, 154, 155, 155,
156, 157, 158, 159, 160, 160, 161, 162, 163, 163, 164, 165, 166, 167, 167, 168,
169, 170, 170, 171, 172, 173, 173, 174, 175, 176, 176, 177, 178, 178, 179, 180,
181, 181, 182, 183, 183, 184, 185, 185, 186, 187, 187, 188, 189, 189, 190, 191,
192, 192, 193, 193, 194, 195, 195, 196, 197, 197, 198, 199, 199, 200, 201, 201,
202, 203, 203, 204, 204, 205, 206, 206, 207, 208, 208, 209, 209, 210, 211, 211,
212, 212, 213, 214, 214, 215, 215, 216, 217, 217, 218, 218, 219, 219, 220, 221,
221, 222, 222, 223, 224, 224, 225, 225, 226, 226, 227, 227, 228, 229, 229, 230,
230, 231, 231, 232, 232, 233, 234, 234, 235, 235, 236, 236, 237, 237, 238, 238,
239, 240, 240, 241, 241, 242, 242, 243, 243, 244, 244, 245, 245, 246, 246, 247,
247, 248, 248, 249, 249, 250, 250, 251, 251, 252, 252, 253, 253, 254, 254, 255
};
static INLINE
int
ss_isqrt(int x) {
int y, e;
if(x >= (SS_BLOCKSIZE * SS_BLOCKSIZE)) { return SS_BLOCKSIZE; }
e = (x & 0xffff0000) ?
((x & 0xff000000) ?
24 + lg_table[(x >> 24) & 0xff] :
16 + lg_table[(x >> 16) & 0xff]) :
((x & 0x0000ff00) ?
8 + lg_table[(x >> 8) & 0xff] :
0 + lg_table[(x >> 0) & 0xff]);
if(e >= 16) {
y = sqq_table[x >> ((e - 6) - (e & 1))] << ((e >> 1) - 7);
if(e >= 24) { y = (y + 1 + x / y) >> 1; }
y = (y + 1 + x / y) >> 1;
} else if(e >= 8) {
y = (sqq_table[x >> ((e - 6) - (e & 1))] >> (7 - (e >> 1))) + 1;
} else {
return sqq_table[x] >> 4;
}
return (x < (y * y)) ? y - 1 : y;
}
#endif /* SS_BLOCKSIZE != 0 */
/*---------------------------------------------------------------------------*/
/* Compares two suffixes. */
static INLINE
int
ss_compare(const unsigned char *T,
const int *p1, const int *p2,
int depth) {
const unsigned char *U1, *U2, *U1n, *U2n;
for(U1 = T + depth + *p1,
U2 = T + depth + *p2,
U1n = T + *(p1 + 1) + 2,
U2n = T + *(p2 + 1) + 2;
(U1 < U1n) && (U2 < U2n) && (*U1 == *U2);
++U1, ++U2) {
}
return U1 < U1n ?
(U2 < U2n ? *U1 - *U2 : 1) :
(U2 < U2n ? -1 : 0);
}
/*---------------------------------------------------------------------------*/
#if (SS_BLOCKSIZE != 1) && (SS_INSERTIONSORT_THRESHOLD != 1)
/* Insertionsort for small size groups */
static
void
ss_insertionsort(const unsigned char *T, const int *PA,
int *first, int *last, int depth) {
int *i, *j;
int t;
int r;
for(i = last - 2; first <= i; --i) {
for(t = *i, j = i + 1; 0 < (r = ss_compare(T, PA + t, PA + *j, depth));) {
do { *(j - 1) = *j; } while((++j < last) && (*j < 0));
if(last <= j) { break; }
}
if(r == 0) { *j = ~*j; }
*(j - 1) = t;
}
}
#endif /* (SS_BLOCKSIZE != 1) && (SS_INSERTIONSORT_THRESHOLD != 1) */
/*---------------------------------------------------------------------------*/
#if (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE)
static INLINE
void
ss_fixdown(const unsigned char *Td, const int *PA,
int *SA, int i, int size) {
int j, k;
int v;
int c, d, e;
for(v = SA[i], c = Td[PA[v]]; (j = 2 * i + 1) < size; SA[i] = SA[k], i = k) {
d = Td[PA[SA[k = j++]]];
if(d < (e = Td[PA[SA[j]]])) { k = j; d = e; }
if(d <= c) { break; }
}
SA[i] = v;
}
/* Simple top-down heapsort. */
static
void
ss_heapsort(const unsigned char *Td, const int *PA, int *SA, int size) {
int i, m;
int t;
m = size;
if((size % 2) == 0) {
m--;
if(Td[PA[SA[m / 2]]] < Td[PA[SA[m]]]) { SWAP(SA[m], SA[m / 2]); }
}
for(i = m / 2 - 1; 0 <= i; --i) { ss_fixdown(Td, PA, SA, i, m); }
if((size % 2) == 0) { SWAP(SA[0], SA[m]); ss_fixdown(Td, PA, SA, 0, m); }
for(i = m - 1; 0 < i; --i) {
t = SA[0], SA[0] = SA[i];
ss_fixdown(Td, PA, SA, 0, i);
SA[i] = t;
}
}
/*---------------------------------------------------------------------------*/
/* Returns the median of three elements. */
static INLINE
int *
ss_median3(const unsigned char *Td, const int *PA,
int *v1, int *v2, int *v3) {
int *t;
if(Td[PA[*v1]] > Td[PA[*v2]]) { SWAP(v1, v2); }
if(Td[PA[*v2]] > Td[PA[*v3]]) {
if(Td[PA[*v1]] > Td[PA[*v3]]) { return v1; }
else { return v3; }
}
return v2;
}
/* Returns the median of five elements. */
static INLINE
int *
ss_median5(const unsigned char *Td, const int *PA,
int *v1, int *v2, int *v3, int *v4, int *v5) {
int *t;
if(Td[PA[*v2]] > Td[PA[*v3]]) { SWAP(v2, v3); }
if(Td[PA[*v4]] > Td[PA[*v5]]) { SWAP(v4, v5); }
if(Td[PA[*v2]] > Td[PA[*v4]]) { SWAP(v2, v4); SWAP(v3, v5); }
if(Td[PA[*v1]] > Td[PA[*v3]]) { SWAP(v1, v3); }
if(Td[PA[*v1]] > Td[PA[*v4]]) { SWAP(v1, v4); SWAP(v3, v5); }
if(Td[PA[*v3]] > Td[PA[*v4]]) { return v4; }
return v3;
}
/* Returns the pivot element. */
static INLINE
int *
ss_pivot(const unsigned char *Td, const int *PA, int *first, int *last) {
int *middle;
int t;
t = last - first;
middle = first + t / 2;
if(t <= 512) {
if(t <= 32) {
return ss_median3(Td, PA, first, middle, last - 1);
} else {
t >>= 2;
return ss_median5(Td, PA, first, first + t, middle, last - 1 - t, last - 1);
}
}
t >>= 3;
first = ss_median3(Td, PA, first, first + t, first + (t << 1));
middle = ss_median3(Td, PA, middle - t, middle, middle + t);
last = ss_median3(Td, PA, last - 1 - (t << 1), last - 1 - t, last - 1);
return ss_median3(Td, PA, first, middle, last);
}
/*---------------------------------------------------------------------------*/
/* Binary partition for substrings. */
static INLINE
int *
ss_partition(const int *PA,
int *first, int *last, int depth) {
int *a, *b;
int t;
for(a = first - 1, b = last;;) {
for(; (++a < b) && ((PA[*a] + depth) >= (PA[*a + 1] + 1));) { *a = ~*a; }
for(; (a < --b) && ((PA[*b] + depth) < (PA[*b + 1] + 1));) { }
if(b <= a) { break; }
t = ~*b;
*b = *a;
*a = t;
}
if(first < a) { *first = ~*first; }
return a;
}
/* Multikey introsort for medium size groups. */
static
void
ss_mintrosort(const unsigned char *T, const int *PA,
int *first, int *last,
int depth) {
#define STACK_SIZE SS_MISORT_STACKSIZE
struct { int *a, *b, c; int d; } stack[STACK_SIZE];
const unsigned char *Td;
int *a, *b, *c, *d, *e, *f;
int s, t;
int ssize;
int limit;
int v, x = 0;
for(ssize = 0, limit = ss_ilg(last - first);;) {
if((last - first) <= SS_INSERTIONSORT_THRESHOLD) {
#if 1 < SS_INSERTIONSORT_THRESHOLD
if(1 < (last - first)) { ss_insertionsort(T, PA, first, last, depth); }
#endif
STACK_POP(first, last, depth, limit);
continue;
}
Td = T + depth;
if(limit-- == 0) { ss_heapsort(Td, PA, first, last - first); }
if(limit < 0) {
for(a = first + 1, v = Td[PA[*first]]; a < last; ++a) {
if((x = Td[PA[*a]]) != v) {
if(1 < (a - first)) { break; }
v = x;
first = a;
}
}
if(Td[PA[*first] - 1] < v) {
first = ss_partition(PA, first, a, depth);
}
if((a - first) <= (last - a)) {
if(1 < (a - first)) {
STACK_PUSH(a, last, depth, -1);
last = a, depth += 1, limit = ss_ilg(a - first);
} else {
first = a, limit = -1;
}
} else {
if(1 < (last - a)) {
STACK_PUSH(first, a, depth + 1, ss_ilg(a - first));
first = a, limit = -1;
} else {
last = a, depth += 1, limit = ss_ilg(a - first);
}
}
continue;
}
/* choose pivot */
a = ss_pivot(Td, PA, first, last);
v = Td[PA[*a]];
SWAP(*first, *a);
/* partition */
for(b = first; (++b < last) && ((x = Td[PA[*b]]) == v);) { }
if(((a = b) < last) && (x < v)) {
for(; (++b < last) && ((x = Td[PA[*b]]) <= v);) {
if(x == v) { SWAP(*b, *a); ++a; }
}
}
for(c = last; (b < --c) && ((x = Td[PA[*c]]) == v);) { }
if((b < (d = c)) && (x > v)) {
for(; (b < --c) && ((x = Td[PA[*c]]) >= v);) {
if(x == v) { SWAP(*c, *d); --d; }
}
}
for(; b < c;) {
SWAP(*b, *c);
for(; (++b < c) && ((x = Td[PA[*b]]) <= v);) {
if(x == v) { SWAP(*b, *a); ++a; }
}
for(; (b < --c) && ((x = Td[PA[*c]]) >= v);) {
if(x == v) { SWAP(*c, *d); --d; }
}
}
if(a <= d) {
c = b - 1;
if((s = a - first) > (t = b - a)) { s = t; }
for(e = first, f = b - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); }
if((s = d - c) > (t = last - d - 1)) { s = t; }
for(e = b, f = last - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); }
a = first + (b - a), c = last - (d - c);
b = (v <= Td[PA[*a] - 1]) ? a : ss_partition(PA, a, c, depth);
if((a - first) <= (last - c)) {
if((last - c) <= (c - b)) {
STACK_PUSH(b, c, depth + 1, ss_ilg(c - b));
STACK_PUSH(c, last, depth, limit);
last = a;
} else if((a - first) <= (c - b)) {
STACK_PUSH(c, last, depth, limit);
STACK_PUSH(b, c, depth + 1, ss_ilg(c - b));
last = a;
} else {
STACK_PUSH(c, last, depth, limit);
STACK_PUSH(first, a, depth, limit);
first = b, last = c, depth += 1, limit = ss_ilg(c - b);
}
} else {
if((a - first) <= (c - b)) {
STACK_PUSH(b, c, depth + 1, ss_ilg(c - b));
STACK_PUSH(first, a, depth, limit);
first = c;
} else if((last - c) <= (c - b)) {
STACK_PUSH(first, a, depth, limit);
STACK_PUSH(b, c, depth + 1, ss_ilg(c - b));
first = c;
} else {
STACK_PUSH(first, a, depth, limit);
STACK_PUSH(c, last, depth, limit);
first = b, last = c, depth += 1, limit = ss_ilg(c - b);
}
}
} else {
limit += 1;
if(Td[PA[*first] - 1] < v) {
first = ss_partition(PA, first, last, depth);
limit = ss_ilg(last - first);
}
depth += 1;
}
}
#undef STACK_SIZE
}
#endif /* (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE) */
/*---------------------------------------------------------------------------*/
#if SS_BLOCKSIZE != 0
static INLINE
void
ss_blockswap(int *a, int *b, int n) {
int t;
for(; 0 < n; --n, ++a, ++b) {
t = *a, *a = *b, *b = t;
}
}
static INLINE
void
ss_rotate(int *first, int *middle, int *last) {
int *a, *b, t;
int l, r;
l = middle - first, r = last - middle;
for(; (0 < l) && (0 < r);) {
if(l == r) { ss_blockswap(first, middle, l); break; }
if(l < r) {
a = last - 1, b = middle - 1;
t = *a;
do {
*a-- = *b, *b-- = *a;
if(b < first) {
*a = t;
last = a;
if((r -= l + 1) <= l) { break; }
a -= 1, b = middle - 1;
t = *a;
}
} while(1);
} else {
a = first, b = middle;
t = *a;
do {
*a++ = *b, *b++ = *a;
if(last <= b) {
*a = t;
first = a + 1;
if((l -= r + 1) <= r) { break; }
a += 1, b = middle;
t = *a;
}
} while(1);
}
}
}
/*---------------------------------------------------------------------------*/
static
void
ss_inplacemerge(const unsigned char *T, const int *PA,
int *first, int *middle, int *last,
int depth) {
const int *p;
int *a, *b;
int len, half;
int q, r;
int x;
for(;;) {
if(*(last - 1) < 0) { x = 1; p = PA + ~*(last - 1); }
else { x = 0; p = PA + *(last - 1); }
for(a = first, len = middle - first, half = len >> 1, r = -1;
0 < len;
len = half, half >>= 1) {
b = a + half;
q = ss_compare(T, PA + ((0 <= *b) ? *b : ~*b), p, depth);
if(q < 0) {
a = b + 1;
half -= (len & 1) ^ 1;
} else {
r = q;
}
}
if(a < middle) {
if(r == 0) { *a = ~*a; }
ss_rotate(a, middle, last);
last -= middle - a;
middle = a;
if(first == middle) { break; }
}
--last;
if(x != 0) { while(*--last < 0) { } }
if(middle == last) { break; }
}
}
/*---------------------------------------------------------------------------*/
/* Merge-forward with internal buffer. */
static
void
ss_mergeforward(const unsigned char *T, const int *PA,
int *first, int *middle, int *last,
int *buf, int depth) {
int *a, *b, *c, *bufend;
int t;
int r;
bufend = buf + (middle - first) - 1;
ss_blockswap(buf, first, middle - first);
for(t = *(a = first), b = buf, c = middle;;) {
r = ss_compare(T, PA + *b, PA + *c, depth);
if(r < 0) {
do {
*a++ = *b;
if(bufend <= b) { *bufend = t; return; }
*b++ = *a;
} while(*b < 0);
} else if(r > 0) {
do {
*a++ = *c, *c++ = *a;
if(last <= c) {
while(b < bufend) { *a++ = *b, *b++ = *a; }
*a = *b, *b = t;
return;
}
} while(*c < 0);
} else {
*c = ~*c;
do {
*a++ = *b;
if(bufend <= b) { *bufend = t; return; }
*b++ = *a;
} while(*b < 0);
do {
*a++ = *c, *c++ = *a;
if(last <= c) {
while(b < bufend) { *a++ = *b, *b++ = *a; }
*a = *b, *b = t;
return;
}
} while(*c < 0);
}
}
}
/* Merge-backward with internal buffer. */
static
void
ss_mergebackward(const unsigned char *T, const int *PA,
int *first, int *middle, int *last,
int *buf, int depth) {
const int *p1, *p2;
int *a, *b, *c, *bufend;
int t;
int r;
int x;
bufend = buf + (last - middle) - 1;
ss_blockswap(buf, middle, last - middle);
x = 0;
if(*bufend < 0) { p1 = PA + ~*bufend; x |= 1; }
else { p1 = PA + *bufend; }
if(*(middle - 1) < 0) { p2 = PA + ~*(middle - 1); x |= 2; }
else { p2 = PA + *(middle - 1); }
for(t = *(a = last - 1), b = bufend, c = middle - 1;;) {
r = ss_compare(T, p1, p2, depth);
if(0 < r) {
if(x & 1) { do { *a-- = *b, *b-- = *a; } while(*b < 0); x ^= 1; }
*a-- = *b;
if(b <= buf) { *buf = t; break; }
*b-- = *a;
if(*b < 0) { p1 = PA + ~*b; x |= 1; }
else { p1 = PA + *b; }
} else if(r < 0) {
if(x & 2) { do { *a-- = *c, *c-- = *a; } while(*c < 0); x ^= 2; }
*a-- = *c, *c-- = *a;
if(c < first) {
while(buf < b) { *a-- = *b, *b-- = *a; }
*a = *b, *b = t;
break;
}
if(*c < 0) { p2 = PA + ~*c; x |= 2; }
else { p2 = PA + *c; }
} else {
if(x & 1) { do { *a-- = *b, *b-- = *a; } while(*b < 0); x ^= 1; }
*a-- = ~*b;
if(b <= buf) { *buf = t; break; }
*b-- = *a;
if(x & 2) { do { *a-- = *c, *c-- = *a; } while(*c < 0); x ^= 2; }
*a-- = *c, *c-- = *a;
if(c < first) {
while(buf < b) { *a-- = *b, *b-- = *a; }
*a = *b, *b = t;
break;
}
if(*b < 0) { p1 = PA + ~*b; x |= 1; }
else { p1 = PA + *b; }
if(*c < 0) { p2 = PA + ~*c; x |= 2; }
else { p2 = PA + *c; }
}
}
}
/* D&C based merge. */
static
void
ss_swapmerge(const unsigned char *T, const int *PA,
int *first, int *middle, int *last,
int *buf, int bufsize, int depth) {
#define STACK_SIZE SS_SMERGE_STACKSIZE
#define GETIDX(a) ((0 <= (a)) ? (a) : (~(a)))
#define MERGE_CHECK(a, b, c)\
do {\
if(((c) & 1) ||\
(((c) & 2) && (ss_compare(T, PA + GETIDX(*((a) - 1)), PA + *(a), depth) == 0))) {\
*(a) = ~*(a);\
}\
if(((c) & 4) && ((ss_compare(T, PA + GETIDX(*((b) - 1)), PA + *(b), depth) == 0))) {\
*(b) = ~*(b);\
}\
} while(0)
struct { int *a, *b, *c; int d; } stack[STACK_SIZE];
int *l, *r, *lm, *rm;
int m, len, half;
int ssize;
int check, next;
for(check = 0, ssize = 0;;) {
if((last - middle) <= bufsize) {
if((first < middle) && (middle < last)) {
ss_mergebackward(T, PA, first, middle, last, buf, depth);
}
MERGE_CHECK(first, last, check);
STACK_POP(first, middle, last, check);
continue;
}
if((middle - first) <= bufsize) {
if(first < middle) {
ss_mergeforward(T, PA, first, middle, last, buf, depth);
}
MERGE_CHECK(first, last, check);
STACK_POP(first, middle, last, check);
continue;
}
for(m = 0, len = MIN(middle - first, last - middle), half = len >> 1;
0 < len;
len = half, half >>= 1) {
if(ss_compare(T, PA + GETIDX(*(middle + m + half)),
PA + GETIDX(*(middle - m - half - 1)), depth) < 0) {
m += half + 1;
half -= (len & 1) ^ 1;
}
}
if(0 < m) {
lm = middle - m, rm = middle + m;
ss_blockswap(lm, middle, m);
l = r = middle, next = 0;
if(rm < last) {
if(*rm < 0) {
*rm = ~*rm;
if(first < lm) { for(; *--l < 0;) { } next |= 4; }
next |= 1;
} else if(first < lm) {
for(; *r < 0; ++r) { }
next |= 2;
}
}
if((l - first) <= (last - r)) {
STACK_PUSH(r, rm, last, (next & 3) | (check & 4));
middle = lm, last = l, check = (check & 3) | (next & 4);
} else {
if((next & 2) && (r == middle)) { next ^= 6; }
STACK_PUSH(first, lm, l, (check & 3) | (next & 4));
first = r, middle = rm, check = (next & 3) | (check & 4);
}
} else {
if(ss_compare(T, PA + GETIDX(*(middle - 1)), PA + *middle, depth) == 0) {
*middle = ~*middle;
}
MERGE_CHECK(first, last, check);
STACK_POP(first, middle, last, check);
}
}
#undef STACK_SIZE
}
#endif /* SS_BLOCKSIZE != 0 */
/*---------------------------------------------------------------------------*/
/* Substring sort */
static
void
sssort(const unsigned char *T, const int *PA,
int *first, int *last,
int *buf, int bufsize,
int depth, int n, int lastsuffix) {
int *a;
#if SS_BLOCKSIZE != 0
int *b, *middle, *curbuf;
int j, k, curbufsize, limit;
#endif
int i;
if(lastsuffix != 0) { ++first; }
#if SS_BLOCKSIZE == 0
ss_mintrosort(T, PA, first, last, depth);
#else
if((bufsize < SS_BLOCKSIZE) &&
(bufsize < (last - first)) &&
(bufsize < (limit = ss_isqrt(last - first)))) {
if(SS_BLOCKSIZE < limit) { limit = SS_BLOCKSIZE; }
buf = middle = last - limit, bufsize = limit;
} else {
middle = last, limit = 0;
}
for(a = first, i = 0; SS_BLOCKSIZE < (middle - a); a += SS_BLOCKSIZE, ++i) {
#if SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE
ss_mintrosort(T, PA, a, a + SS_BLOCKSIZE, depth);
#elif 1 < SS_BLOCKSIZE
ss_insertionsort(T, PA, a, a + SS_BLOCKSIZE, depth);
#endif
curbufsize = last - (a + SS_BLOCKSIZE);
curbuf = a + SS_BLOCKSIZE;
if(curbufsize <= bufsize) { curbufsize = bufsize, curbuf = buf; }
for(b = a, k = SS_BLOCKSIZE, j = i; j & 1; b -= k, k <<= 1, j >>= 1) {
ss_swapmerge(T, PA, b - k, b, b + k, curbuf, curbufsize, depth);
}
}
#if SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE
ss_mintrosort(T, PA, a, middle, depth);
#elif 1 < SS_BLOCKSIZE
ss_insertionsort(T, PA, a, middle, depth);
#endif
for(k = SS_BLOCKSIZE; i != 0; k <<= 1, i >>= 1) {
if(i & 1) {
ss_swapmerge(T, PA, a - k, a, middle, buf, bufsize, depth);
a -= k;
}
}
if(limit != 0) {
#if SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE
ss_mintrosort(T, PA, middle, last, depth);
#elif 1 < SS_BLOCKSIZE
ss_insertionsort(T, PA, middle, last, depth);
#endif
ss_inplacemerge(T, PA, first, middle, last, depth);
}
#endif
if(lastsuffix != 0) {
/* Insert last type B* suffix. */
int PAi[2]; PAi[0] = PA[*(first - 1)], PAi[1] = n - 2;
for(a = first, i = *(first - 1);
(a < last) && ((*a < 0) || (0 < ss_compare(T, &(PAi[0]), PA + *a, depth)));
++a) {
*(a - 1) = *a;
}
*(a - 1) = i;
}
}
/*---------------------------------------------------------------------------*/
static INLINE
int
tr_ilg(int n) {
return (n & 0xffff0000) ?
((n & 0xff000000) ?
24 + lg_table[(n >> 24) & 0xff] :
16 + lg_table[(n >> 16) & 0xff]) :
((n & 0x0000ff00) ?
8 + lg_table[(n >> 8) & 0xff] :
0 + lg_table[(n >> 0) & 0xff]);
}
/*---------------------------------------------------------------------------*/
/* Simple insertionsort for small size groups. */
static
void
tr_insertionsort(const int *ISAd, int *first, int *last) {
int *a, *b;
int t, r;
for(a = first + 1; a < last; ++a) {
for(t = *a, b = a - 1; 0 > (r = ISAd[t] - ISAd[*b]);) {
do { *(b + 1) = *b; } while((first <= --b) && (*b < 0));
if(b < first) { break; }
}
if(r == 0) { *b = ~*b; }
*(b + 1) = t;
}
}
/*---------------------------------------------------------------------------*/
static INLINE
void
tr_fixdown(const int *ISAd, int *SA, int i, int size) {
int j, k;
int v;
int c, d, e;
for(v = SA[i], c = ISAd[v]; (j = 2 * i + 1) < size; SA[i] = SA[k], i = k) {
d = ISAd[SA[k = j++]];
if(d < (e = ISAd[SA[j]])) { k = j; d = e; }
if(d <= c) { break; }
}
SA[i] = v;
}
/* Simple top-down heapsort. */
static
void
tr_heapsort(const int *ISAd, int *SA, int size) {
int i, m;
int t;
m = size;
if((size % 2) == 0) {
m--;
if(ISAd[SA[m / 2]] < ISAd[SA[m]]) { SWAP(SA[m], SA[m / 2]); }
}
for(i = m / 2 - 1; 0 <= i; --i) { tr_fixdown(ISAd, SA, i, m); }
if((size % 2) == 0) { SWAP(SA[0], SA[m]); tr_fixdown(ISAd, SA, 0, m); }
for(i = m - 1; 0 < i; --i) {
t = SA[0], SA[0] = SA[i];
tr_fixdown(ISAd, SA, 0, i);
SA[i] = t;
}
}
/*---------------------------------------------------------------------------*/
/* Returns the median of three elements. */
static INLINE
int *
tr_median3(const int *ISAd, int *v1, int *v2, int *v3) {
int *t;
if(ISAd[*v1] > ISAd[*v2]) { SWAP(v1, v2); }
if(ISAd[*v2] > ISAd[*v3]) {
if(ISAd[*v1] > ISAd[*v3]) { return v1; }
else { return v3; }
}
return v2;
}
/* Returns the median of five elements. */
static INLINE
int *
tr_median5(const int *ISAd,
int *v1, int *v2, int *v3, int *v4, int *v5) {
int *t;
if(ISAd[*v2] > ISAd[*v3]) { SWAP(v2, v3); }
if(ISAd[*v4] > ISAd[*v5]) { SWAP(v4, v5); }
if(ISAd[*v2] > ISAd[*v4]) { SWAP(v2, v4); SWAP(v3, v5); }
if(ISAd[*v1] > ISAd[*v3]) { SWAP(v1, v3); }
if(ISAd[*v1] > ISAd[*v4]) { SWAP(v1, v4); SWAP(v3, v5); }
if(ISAd[*v3] > ISAd[*v4]) { return v4; }
return v3;
}
/* Returns the pivot element. */
static INLINE
int *
tr_pivot(const int *ISAd, int *first, int *last) {
int *middle;
int t;
t = last - first;
middle = first + t / 2;
if(t <= 512) {
if(t <= 32) {
return tr_median3(ISAd, first, middle, last - 1);
} else {
t >>= 2;
return tr_median5(ISAd, first, first + t, middle, last - 1 - t, last - 1);
}
}
t >>= 3;
first = tr_median3(ISAd, first, first + t, first + (t << 1));
middle = tr_median3(ISAd, middle - t, middle, middle + t);
last = tr_median3(ISAd, last - 1 - (t << 1), last - 1 - t, last - 1);
return tr_median3(ISAd, first, middle, last);
}
/*---------------------------------------------------------------------------*/
typedef struct _trbudget_t trbudget_t;
struct _trbudget_t {
int chance;
int remain;
int incval;
int count;
};
static INLINE
void
trbudget_init(trbudget_t *budget, int chance, int incval) {
budget->chance = chance;
budget->remain = budget->incval = incval;
}
static INLINE
int
trbudget_check(trbudget_t *budget, int size) {
if(size <= budget->remain) { budget->remain -= size; return 1; }
if(budget->chance == 0) { budget->count += size; return 0; }
budget->remain += budget->incval - size;
budget->chance -= 1;
return 1;
}
/*---------------------------------------------------------------------------*/
static INLINE
void
tr_partition(const int *ISAd,
int *first, int *middle, int *last,
int **pa, int **pb, int v) {
int *a, *b, *c, *d, *e, *f;
int t, s;
int x = 0;
for(b = middle - 1; (++b < last) && ((x = ISAd[*b]) == v);) { }
if(((a = b) < last) && (x < v)) {
for(; (++b < last) && ((x = ISAd[*b]) <= v);) {
if(x == v) { SWAP(*b, *a); ++a; }
}
}
for(c = last; (b < --c) && ((x = ISAd[*c]) == v);) { }
if((b < (d = c)) && (x > v)) {
for(; (b < --c) && ((x = ISAd[*c]) >= v);) {
if(x == v) { SWAP(*c, *d); --d; }
}
}
for(; b < c;) {
SWAP(*b, *c);
for(; (++b < c) && ((x = ISAd[*b]) <= v);) {
if(x == v) { SWAP(*b, *a); ++a; }
}
for(; (b < --c) && ((x = ISAd[*c]) >= v);) {
if(x == v) { SWAP(*c, *d); --d; }
}
}
if(a <= d) {
c = b - 1;
if((s = a - first) > (t = b - a)) { s = t; }
for(e = first, f = b - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); }
if((s = d - c) > (t = last - d - 1)) { s = t; }
for(e = b, f = last - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); }
first += (b - a), last -= (d - c);
}
*pa = first, *pb = last;
}
static
void
tr_copy(int *ISA, const int *SA,
int *first, int *a, int *b, int *last,
int depth) {
/* sort suffixes of middle partition
by using sorted order of suffixes of left and right partition. */
int *c, *d, *e;
int s, v;
v = b - SA - 1;
for(c = first, d = a - 1; c <= d; ++c) {
if((0 <= (s = *c - depth)) && (ISA[s] == v)) {
*++d = s;
ISA[s] = d - SA;
}
}
for(c = last - 1, e = d + 1, d = b; e < d; --c) {
if((0 <= (s = *c - depth)) && (ISA[s] == v)) {
*--d = s;
ISA[s] = d - SA;
}
}
}
static
void
tr_partialcopy(int *ISA, const int *SA,
int *first, int *a, int *b, int *last,
int depth) {
int *c, *d, *e;
int s, v;
int rank, lastrank, newrank = -1;
v = b - SA - 1;
lastrank = -1;
for(c = first, d = a - 1; c <= d; ++c) {
if((0 <= (s = *c - depth)) && (ISA[s] == v)) {
*++d = s;
rank = ISA[s + depth];
if(lastrank != rank) { lastrank = rank; newrank = d - SA; }
ISA[s] = newrank;
}
}
lastrank = -1;
for(e = d; first <= e; --e) {
rank = ISA[*e];
if(lastrank != rank) { lastrank = rank; newrank = e - SA; }
if(newrank != rank) { ISA[*e] = newrank; }
}
lastrank = -1;
for(c = last - 1, e = d + 1, d = b; e < d; --c) {
if((0 <= (s = *c - depth)) && (ISA[s] == v)) {
*--d = s;
rank = ISA[s + depth];
if(lastrank != rank) { lastrank = rank; newrank = d - SA; }
ISA[s] = newrank;
}
}
}
static
void
tr_introsort(int *ISA, const int *ISAd,
int *SA, int *first, int *last,
trbudget_t *budget) {
#define STACK_SIZE TR_STACKSIZE
struct { const int *a; int *b, *c; int d, e; }stack[STACK_SIZE];
int *a, *b, *c;
int t;
int v, x = 0;
int incr = ISAd - ISA;
int limit, next;
int ssize, trlink = -1;
for(ssize = 0, limit = tr_ilg(last - first);;) {
if(limit < 0) {
if(limit == -1) {
/* tandem repeat partition */
tr_partition(ISAd - incr, first, first, last, &a, &b, last - SA - 1);
/* update ranks */
if(a < last) {
for(c = first, v = a - SA - 1; c < a; ++c) { ISA[*c] = v; }
}
if(b < last) {
for(c = a, v = b - SA - 1; c < b; ++c) { ISA[*c] = v; }
}
/* push */
if(1 < (b - a)) {
STACK_PUSH5(NULL, a, b, 0, 0);
STACK_PUSH5(ISAd - incr, first, last, -2, trlink);
trlink = ssize - 2;
}
if((a - first) <= (last - b)) {
if(1 < (a - first)) {
STACK_PUSH5(ISAd, b, last, tr_ilg(last - b), trlink);
last = a, limit = tr_ilg(a - first);
} else if(1 < (last - b)) {
first = b, limit = tr_ilg(last - b);
} else {
STACK_POP5(ISAd, first, last, limit, trlink);
}
} else {
if(1 < (last - b)) {
STACK_PUSH5(ISAd, first, a, tr_ilg(a - first), trlink);
first = b, limit = tr_ilg(last - b);
} else if(1 < (a - first)) {
last = a, limit = tr_ilg(a - first);
} else {
STACK_POP5(ISAd, first, last, limit, trlink);
}
}
} else if(limit == -2) {
/* tandem repeat copy */
a = stack[--ssize].b, b = stack[ssize].c;
if(stack[ssize].d == 0) {
tr_copy(ISA, SA, first, a, b, last, ISAd - ISA);
} else {
if(0 <= trlink) { stack[trlink].d = -1; }
tr_partialcopy(ISA, SA, first, a, b, last, ISAd - ISA);
}
STACK_POP5(ISAd, first, last, limit, trlink);
} else {
/* sorted partition */
if(0 <= *first) {
a = first;
do { ISA[*a] = a - SA; } while((++a < last) && (0 <= *a));
first = a;
}
if(first < last) {
a = first; do { *a = ~*a; } while(*++a < 0);
next = (ISA[*a] != ISAd[*a]) ? tr_ilg(a - first + 1) : -1;
if(++a < last) { for(b = first, v = a - SA - 1; b < a; ++b) { ISA[*b] = v; } }
/* push */
if(trbudget_check(budget, a - first)) {
if((a - first) <= (last - a)) {
STACK_PUSH5(ISAd, a, last, -3, trlink);
ISAd += incr, last = a, limit = next;
} else {
if(1 < (last - a)) {
STACK_PUSH5(ISAd + incr, first, a, next, trlink);
first = a, limit = -3;
} else {
ISAd += incr, last = a, limit = next;
}
}
} else {
if(0 <= trlink) { stack[trlink].d = -1; }
if(1 < (last - a)) {
first = a, limit = -3;
} else {
STACK_POP5(ISAd, first, last, limit, trlink);
}
}
} else {
STACK_POP5(ISAd, first, last, limit, trlink);
}
}
continue;
}
if((last - first) <= TR_INSERTIONSORT_THRESHOLD) {
tr_insertionsort(ISAd, first, last);
limit = -3;
continue;
}
if(limit-- == 0) {
tr_heapsort(ISAd, first, last - first);
for(a = last - 1; first < a; a = b) {
for(x = ISAd[*a], b = a - 1; (first <= b) && (ISAd[*b] == x); --b) { *b = ~*b; }
}
limit = -3;
continue;
}
/* choose pivot */
a = tr_pivot(ISAd, first, last);
SWAP(*first, *a);
v = ISAd[*first];
/* partition */
tr_partition(ISAd, first, first + 1, last, &a, &b, v);
if((last - first) != (b - a)) {
next = (ISA[*a] != v) ? tr_ilg(b - a) : -1;
/* update ranks */
for(c = first, v = a - SA - 1; c < a; ++c) { ISA[*c] = v; }
if(b < last) { for(c = a, v = b - SA - 1; c < b; ++c) { ISA[*c] = v; } }
/* push */
if((1 < (b - a)) && (trbudget_check(budget, b - a))) {
if((a - first) <= (last - b)) {
if((last - b) <= (b - a)) {
if(1 < (a - first)) {
STACK_PUSH5(ISAd + incr, a, b, next, trlink);
STACK_PUSH5(ISAd, b, last, limit, trlink);
last = a;
} else if(1 < (last - b)) {
STACK_PUSH5(ISAd + incr, a, b, next, trlink);
first = b;
} else {
ISAd += incr, first = a, last = b, limit = next;
}
} else if((a - first) <= (b - a)) {
if(1 < (a - first)) {
STACK_PUSH5(ISAd, b, last, limit, trlink);
STACK_PUSH5(ISAd + incr, a, b, next, trlink);
last = a;
} else {
STACK_PUSH5(ISAd, b, last, limit, trlink);
ISAd += incr, first = a, last = b, limit = next;
}
} else {
STACK_PUSH5(ISAd, b, last, limit, trlink);
STACK_PUSH5(ISAd, first, a, limit, trlink);
ISAd += incr, first = a, last = b, limit = next;
}
} else {
if((a - first) <= (b - a)) {
if(1 < (last - b)) {
STACK_PUSH5(ISAd + incr, a, b, next, trlink);
STACK_PUSH5(ISAd, first, a, limit, trlink);
first = b;
} else if(1 < (a - first)) {
STACK_PUSH5(ISAd + incr, a, b, next, trlink);
last = a;
} else {
ISAd += incr, first = a, last = b, limit = next;
}
} else if((last - b) <= (b - a)) {
if(1 < (last - b)) {
STACK_PUSH5(ISAd, first, a, limit, trlink);
STACK_PUSH5(ISAd + incr, a, b, next, trlink);
first = b;
} else {
STACK_PUSH5(ISAd, first, a, limit, trlink);
ISAd += incr, first = a, last = b, limit = next;
}
} else {
STACK_PUSH5(ISAd, first, a, limit, trlink);
STACK_PUSH5(ISAd, b, last, limit, trlink);
ISAd += incr, first = a, last = b, limit = next;
}
}
} else {
if((1 < (b - a)) && (0 <= trlink)) { stack[trlink].d = -1; }
if((a - first) <= (last - b)) {
if(1 < (a - first)) {
STACK_PUSH5(ISAd, b, last, limit, trlink);
last = a;
} else if(1 < (last - b)) {
first = b;
} else {
STACK_POP5(ISAd, first, last, limit, trlink);
}
} else {
if(1 < (last - b)) {
STACK_PUSH5(ISAd, first, a, limit, trlink);
first = b;
} else if(1 < (a - first)) {
last = a;
} else {
STACK_POP5(ISAd, first, last, limit, trlink);
}
}
}
} else {
if(trbudget_check(budget, last - first)) {
limit = tr_ilg(last - first), ISAd += incr;
} else {
if(0 <= trlink) { stack[trlink].d = -1; }
STACK_POP5(ISAd, first, last, limit, trlink);
}
}
}
#undef STACK_SIZE
}
/*---------------------------------------------------------------------------*/
/* Tandem repeat sort */
static
void
trsort(int *ISA, int *SA, int n, int depth) {
int *ISAd;
int *first, *last;
trbudget_t budget;
int t, skip, unsorted;
trbudget_init(&budget, tr_ilg(n) * 2 / 3, n);
/* trbudget_init(&budget, tr_ilg(n) * 3 / 4, n); */
for(ISAd = ISA + depth; -n < *SA; ISAd += ISAd - ISA) {
first = SA;
skip = 0;
unsorted = 0;
do {
if((t = *first) < 0) { first -= t; skip += t; }
else {
if(skip != 0) { *(first + skip) = skip; skip = 0; }
last = SA + ISA[t] + 1;
if(1 < (last - first)) {
budget.count = 0;
tr_introsort(ISA, ISAd, SA, first, last, &budget);
if(budget.count != 0) { unsorted += budget.count; }
else { skip = first - last; }
} else if((last - first) == 1) {
skip = -1;
}
first = last;
}
} while(first < (SA + n));
if(skip != 0) { *(first + skip) = skip; }
if(unsorted == 0) { break; }
}
}
/*---------------------------------------------------------------------------*/
/* Sorts suffixes of type B*. */
static
int
sort_typeBstar(const unsigned char *T, int *SA,
int *bucket_A, int *bucket_B,
int n, int openMP) {
int *PAb, *ISAb, *buf;
#ifdef LIBBSC_OPENMP
int *curbuf;
int l;
#endif
int i, j, k, t, m, bufsize;
int c0, c1;
#ifdef LIBBSC_OPENMP
int d0, d1;
#endif
(void)openMP;
/* Initialize bucket arrays. */
for(i = 0; i < BUCKET_A_SIZE; ++i) { bucket_A[i] = 0; }
for(i = 0; i < BUCKET_B_SIZE; ++i) { bucket_B[i] = 0; }
/* Count the number of occurrences of the first one or two characters of each
type A, B and B* suffix. Moreover, store the beginning position of all
type B* suffixes into the array SA. */
for(i = n - 1, m = n, c0 = T[n - 1]; 0 <= i;) {
/* type A suffix. */
do { ++BUCKET_A(c1 = c0); } while((0 <= --i) && ((c0 = T[i]) >= c1));
if(0 <= i) {
/* type B* suffix. */
++BUCKET_BSTAR(c0, c1);
SA[--m] = i;
/* type B suffix. */
for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) <= c1); --i, c1 = c0) {
++BUCKET_B(c0, c1);
}
}
}
m = n - m;
/*
note:
A type B* suffix is lexicographically smaller than a type B suffix that
begins with the same first two characters.
*/
/* Calculate the index of start/end point of each bucket. */
for(c0 = 0, i = 0, j = 0; c0 < ALPHABET_SIZE; ++c0) {
t = i + BUCKET_A(c0);
BUCKET_A(c0) = i + j; /* start point */
i = t + BUCKET_B(c0, c0);
for(c1 = c0 + 1; c1 < ALPHABET_SIZE; ++c1) {
j += BUCKET_BSTAR(c0, c1);
BUCKET_BSTAR(c0, c1) = j; /* end point */
i += BUCKET_B(c0, c1);
}
}
if(0 < m) {
/* Sort the type B* suffixes by their first two characters. */
PAb = SA + n - m; ISAb = SA + m;
for(i = m - 2; 0 <= i; --i) {
t = PAb[i], c0 = T[t], c1 = T[t + 1];
SA[--BUCKET_BSTAR(c0, c1)] = i;
}
t = PAb[m - 1], c0 = T[t], c1 = T[t + 1];
SA[--BUCKET_BSTAR(c0, c1)] = m - 1;
/* Sort the type B* substrings using sssort. */
#ifdef LIBBSC_OPENMP
if (openMP)
{
buf = SA + m;
c0 = ALPHABET_SIZE - 2, c1 = ALPHABET_SIZE - 1, j = m;
#pragma omp parallel default(shared) private(bufsize, curbuf, k, l, d0, d1)
{
bufsize = (n - (2 * m)) / omp_get_num_threads();
curbuf = buf + omp_get_thread_num() * bufsize;
k = 0;
for(;;) {
#pragma omp critical(sssort_lock)
{
if(0 < (l = j)) {
d0 = c0, d1 = c1;
do {
k = BUCKET_BSTAR(d0, d1);
if(--d1 <= d0) {
d1 = ALPHABET_SIZE - 1;
if(--d0 < 0) { break; }
}
} while(((l - k) <= 1) && (0 < (l = k)));
c0 = d0, c1 = d1, j = k;
}
}
if(l == 0) { break; }
sssort(T, PAb, SA + k, SA + l,
curbuf, bufsize, 2, n, *(SA + k) == (m - 1));
}
}
}
else
{
buf = SA + m, bufsize = n - (2 * m);
for(c0 = ALPHABET_SIZE - 2, j = m; 0 < j; --c0) {
for(c1 = ALPHABET_SIZE - 1; c0 < c1; j = i, --c1) {
i = BUCKET_BSTAR(c0, c1);
if(1 < (j - i)) {
sssort(T, PAb, SA + i, SA + j,
buf, bufsize, 2, n, *(SA + i) == (m - 1));
}
}
}
}
#else
buf = SA + m, bufsize = n - (2 * m);
for(c0 = ALPHABET_SIZE - 2, j = m; 0 < j; --c0) {
for(c1 = ALPHABET_SIZE - 1; c0 < c1; j = i, --c1) {
i = BUCKET_BSTAR(c0, c1);
if(1 < (j - i)) {
sssort(T, PAb, SA + i, SA + j,
buf, bufsize, 2, n, *(SA + i) == (m - 1));
}
}
}
#endif
/* Compute ranks of type B* substrings. */
for(i = m - 1; 0 <= i; --i) {
if(0 <= SA[i]) {
j = i;
do { ISAb[SA[i]] = i; } while((0 <= --i) && (0 <= SA[i]));
SA[i + 1] = i - j;
if(i <= 0) { break; }
}
j = i;
do { ISAb[SA[i] = ~SA[i]] = j; } while(SA[--i] < 0);
ISAb[SA[i]] = j;
}
/* Construct the inverse suffix array of type B* suffixes using trsort. */
trsort(ISAb, SA, m, 1);
/* Set the sorted order of tyoe B* suffixes. */
for(i = n - 1, j = m, c0 = T[n - 1]; 0 <= i;) {
for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) >= c1); --i, c1 = c0) { }
if(0 <= i) {
t = i;
for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) <= c1); --i, c1 = c0) { }
SA[ISAb[--j]] = ((t == 0) || (1 < (t - i))) ? t : ~t;
}
}
/* Calculate the index of start/end point of each bucket. */
BUCKET_B(ALPHABET_SIZE - 1, ALPHABET_SIZE - 1) = n; /* end point */
for(c0 = ALPHABET_SIZE - 2, k = m - 1; 0 <= c0; --c0) {
i = BUCKET_A(c0 + 1) - 1;
for(c1 = ALPHABET_SIZE - 1; c0 < c1; --c1) {
t = i - BUCKET_B(c0, c1);
BUCKET_B(c0, c1) = i; /* end point */
/* Move all type B* suffixes to the correct position. */
for(i = t, j = BUCKET_BSTAR(c0, c1);
j <= k;
--i, --k) { SA[i] = SA[k]; }
}
BUCKET_BSTAR(c0, c0 + 1) = i - BUCKET_B(c0, c0) + 1; /* start point */
BUCKET_B(c0, c0) = i; /* end point */
}
}
return m;
}
/* Constructs the suffix array by using the sorted order of type B* suffixes. */
static
void
construct_SA(const unsigned char *T, int *SA,
int *bucket_A, int *bucket_B,
int n, int m) {
int *i, *j, *k;
int s;
int c0, c1, c2;
if(0 < m) {
/* Construct the sorted order of type B suffixes by using
the sorted order of type B* suffixes. */
for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) {
/* Scan the suffix array from right to left. */
for(i = SA + BUCKET_BSTAR(c1, c1 + 1),
j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1;
i <= j;
--j) {
if(0 < (s = *j)) {
assert(T[s] == c1);
assert(((s + 1) < n) && (T[s] <= T[s + 1]));
assert(T[s - 1] <= T[s]);
*j = ~s;
c0 = T[--s];
if((0 < s) && (T[s - 1] > c0)) { s = ~s; }
if(c0 != c2) {
if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; }
k = SA + BUCKET_B(c2 = c0, c1);
}
assert(k < j);
*k-- = s;
} else {
assert(((s == 0) && (T[s] == c1)) || (s < 0));
*j = ~s;
}
}
}
}
/* Construct the suffix array by using
the sorted order of type B suffixes. */
k = SA + BUCKET_A(c2 = T[n - 1]);
*k++ = (T[n - 2] < c2) ? ~(n - 1) : (n - 1);
/* Scan the suffix array from left to right. */
for(i = SA, j = SA + n; i < j; ++i) {
if(0 < (s = *i)) {
assert(T[s - 1] >= T[s]);
c0 = T[--s];
if((s == 0) || (T[s - 1] < c0)) { s = ~s; }
if(c0 != c2) {
BUCKET_A(c2) = k - SA;
k = SA + BUCKET_A(c2 = c0);
}
assert(i < k);
*k++ = s;
} else {
assert(s < 0);
*i = ~s;
}
}
}
/* Constructs the burrows-wheeler transformed string directly
by using the sorted order of type B* suffixes. */
static
int
construct_BWT(const unsigned char *T, int *SA,
int *bucket_A, int *bucket_B,
int n, int m) {
int *i, *j, *k, *orig;
int s;
int c0, c1, c2;
if(0 < m) {
/* Construct the sorted order of type B suffixes by using
the sorted order of type B* suffixes. */
for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) {
/* Scan the suffix array from right to left. */
for(i = SA + BUCKET_BSTAR(c1, c1 + 1),
j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1;
i <= j;
--j) {
if(0 < (s = *j)) {
assert(T[s] == c1);
assert(((s + 1) < n) && (T[s] <= T[s + 1]));
assert(T[s - 1] <= T[s]);
c0 = T[--s];
*j = ~((int)c0);
if((0 < s) && (T[s - 1] > c0)) { s = ~s; }
if(c0 != c2) {
if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; }
k = SA + BUCKET_B(c2 = c0, c1);
}
assert(k < j);
*k-- = s;
} else if(s != 0) {
*j = ~s;
#ifndef NDEBUG
} else {
assert(T[s] == c1);
#endif
}
}
}
}
/* Construct the BWTed string by using
the sorted order of type B suffixes. */
k = SA + BUCKET_A(c2 = T[n - 1]);
*k++ = (T[n - 2] < c2) ? ~((int)T[n - 2]) : (n - 1);
/* Scan the suffix array from left to right. */
for(i = SA, j = SA + n, orig = SA; i < j; ++i) {
if(0 < (s = *i)) {
assert(T[s - 1] >= T[s]);
c0 = T[--s];
*i = c0;
if((0 < s) && (T[s - 1] < c0)) { s = ~((int)T[s - 1]); }
if(c0 != c2) {
BUCKET_A(c2) = k - SA;
k = SA + BUCKET_A(c2 = c0);
}
assert(i < k);
*k++ = s;
} else if(s != 0) {
*i = ~s;
} else {
orig = i;
}
}
return orig - SA;
}
/* Constructs the burrows-wheeler transformed string directly
by using the sorted order of type B* suffixes. */
static
int
construct_BWT_indexes(const unsigned char *T, int *SA,
int *bucket_A, int *bucket_B,
int n, int m,
unsigned char * num_indexes, int * indexes) {
int *i, *j, *k, *orig;
int s;
int c0, c1, c2;
int mod = n / 8;
{
mod |= mod >> 1; mod |= mod >> 2;
mod |= mod >> 4; mod |= mod >> 8;
mod |= mod >> 16; mod >>= 1;
*num_indexes = (unsigned char)((n - 1) / (mod + 1));
}
if(0 < m) {
/* Construct the sorted order of type B suffixes by using
the sorted order of type B* suffixes. */
for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) {
/* Scan the suffix array from right to left. */
for(i = SA + BUCKET_BSTAR(c1, c1 + 1),
j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1;
i <= j;
--j) {
if(0 < (s = *j)) {
assert(T[s] == c1);
assert(((s + 1) < n) && (T[s] <= T[s + 1]));
assert(T[s - 1] <= T[s]);
if ((s & mod) == 0) indexes[s / (mod + 1) - 1] = j - SA;
c0 = T[--s];
*j = ~((int)c0);
if((0 < s) && (T[s - 1] > c0)) { s = ~s; }
if(c0 != c2) {
if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; }
k = SA + BUCKET_B(c2 = c0, c1);
}
assert(k < j);
*k-- = s;
} else if(s != 0) {
*j = ~s;
#ifndef NDEBUG
} else {
assert(T[s] == c1);
#endif
}
}
}
}
/* Construct the BWTed string by using
the sorted order of type B suffixes. */
k = SA + BUCKET_A(c2 = T[n - 1]);
if (T[n - 2] < c2) {
if (((n - 1) & mod) == 0) indexes[(n - 1) / (mod + 1) - 1] = k - SA;
*k++ = ~((int)T[n - 2]);
}
else {
*k++ = n - 1;
}
/* Scan the suffix array from left to right. */
for(i = SA, j = SA + n, orig = SA; i < j; ++i) {
if(0 < (s = *i)) {
assert(T[s - 1] >= T[s]);
if ((s & mod) == 0) indexes[s / (mod + 1) - 1] = i - SA;
c0 = T[--s];
*i = c0;
if(c0 != c2) {
BUCKET_A(c2) = k - SA;
k = SA + BUCKET_A(c2 = c0);
}
assert(i < k);
if((0 < s) && (T[s - 1] < c0)) {
if ((s & mod) == 0) indexes[s / (mod + 1) - 1] = k - SA;
*k++ = ~((int)T[s - 1]);
} else
*k++ = s;
} else if(s != 0) {
*i = ~s;
} else {
orig = i;
}
}
return orig - SA;
}
/*---------------------------------------------------------------------------*/
/*- Function -*/
int
divsufsort(const unsigned char *T, int *SA, int n, int openMP) {
int *bucket_A, *bucket_B;
int m;
int err = 0;
/* Check arguments. */
if((T == NULL) || (SA == NULL) || (n < 0)) { return -1; }
else if(n == 0) { return 0; }
else if(n == 1) { SA[0] = 0; return 0; }
else if(n == 2) { m = (T[0] < T[1]); SA[m ^ 1] = 0, SA[m] = 1; return 0; }
bucket_A = (int *)malloc(BUCKET_A_SIZE * sizeof(int));
bucket_B = (int *)malloc(BUCKET_B_SIZE * sizeof(int));
/* Suffixsort. */
if((bucket_A != NULL) && (bucket_B != NULL)) {
m = sort_typeBstar(T, SA, bucket_A, bucket_B, n, openMP);
construct_SA(T, SA, bucket_A, bucket_B, n, m);
} else {
err = -2;
}
free(bucket_B);
free(bucket_A);
return err;
}
int
divbwt(const unsigned char *T, unsigned char *U, int *A, int n, unsigned char * num_indexes, int * indexes, int openMP) {
int *B;
int *bucket_A, *bucket_B;
int m, pidx, i;
/* Check arguments. */
if((T == NULL) || (U == NULL) || (n < 0)) { return -1; }
else if(n <= 1) { if(n == 1) { U[0] = T[0]; } return n; }
if((B = A) == NULL) { B = (int *)malloc((size_t)(n + 1) * sizeof(int)); }
bucket_A = (int *)malloc(BUCKET_A_SIZE * sizeof(int));
bucket_B = (int *)malloc(BUCKET_B_SIZE * sizeof(int));
/* Burrows-Wheeler Transform. */
if((B != NULL) && (bucket_A != NULL) && (bucket_B != NULL)) {
m = sort_typeBstar(T, B, bucket_A, bucket_B, n, openMP);
if (num_indexes == NULL || indexes == NULL) {
pidx = construct_BWT(T, B, bucket_A, bucket_B, n, m);
} else {
pidx = construct_BWT_indexes(T, B, bucket_A, bucket_B, n, m, num_indexes, indexes);
}
/* Copy to output string. */
U[0] = T[n - 1];
for(i = 0; i < pidx; ++i) { U[i + 1] = (unsigned char)B[i]; }
for(i += 1; i < n; ++i) { U[i] = (unsigned char)B[i]; }
pidx += 1;
} else {
pidx = -2;
}
free(bucket_B);
free(bucket_A);
if(A == NULL) { free(B); }
return pidx;
}
|
comb.c | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <time.h>
#include <omp.h>
void comb_sort(int *, unsigned long, unsigned int);
void imprimir_vetor(int *, unsigned long);
void verify(int *, unsigned long);
int main(int argc, char *argv[])
{
struct timeval timevalA;
struct timeval timevalB;
int *vetor = NULL;
unsigned long tam, i = 0;
unsigned int numThread;
if (argc != 3) {
printf("%s elementos e numero de threads\n", argv[0]);
exit(EXIT_FAILURE);
}
tam = atoi(argv[1]);
numThread = atoi(argv[2]);
if (!(vetor = (int *) malloc(sizeof(int) * tam))) {
printf("Erro ao alocar memória\n");
exit(EXIT_FAILURE);
}
double sum = 0.0;
srand(time(NULL));
for (int j = 0; j < 10; j++) {
for (i = 0; i < tam; i++) {
*(vetor + i) = random() % 10000;
}
gettimeofday(&timevalA, NULL);
comb_sort(vetor, tam,numThread);
gettimeofday(&timevalB, NULL);
verify(vetor,tam);
double t = timevalB.tv_sec - timevalA.tv_sec + (timevalB.tv_usec - timevalA.tv_usec) / (double) 1000000;
sum += t;
printf("%lf\n", t);
}
printf("#\t%lf\t%u\t%lu\n", sum/10.0, numThread, tam);
free(vetor);
return EXIT_SUCCESS;
}
void comb_sort(int *vetor, unsigned long tam,unsigned int numThread)
{
unsigned int i;
int intervalo, trocado = 1;
int aux;
intervalo = tam;
while (intervalo > 1 || trocado == 1)
{
intervalo = intervalo * 10 / 13;
if (intervalo == 9 || intervalo == 10) {
intervalo = 11;
}
if (intervalo < 1) {
intervalo = 1;
}
trocado = 0;
#pragma omp parallel for schedule(static) num_threads(numThread) shared(vetor,tam,intervalo,trocado) private(aux,i)
for (i = 0; i < tam - intervalo; i++)
{
if (vetor[i] > vetor[i+intervalo])
{
aux = vetor[i];
vetor[i] = vetor[i+intervalo];
vetor[i+intervalo] = aux;
trocado = 1;
}
}
}
}
void verify(int *vetor, unsigned long tam){
for (int i = 0; i < tam - 1; i++) {
if (vetor[i] > vetor[i+1]) {
printf("TA ERRADO MANO\n");
break;
}
}
}
void imprimir_vetor(int *vetor, unsigned long tam)
{
unsigned long i;
for (i = 0; i < tam; i++) {
printf("%d\t", vetor[i]);
}
printf("\n");
}
|
Example_SIMD.2.c | /*
* @@name: SIMD.2c
* @@type: C
* @@compilable: yes
* @@linkable: yes
* @@expect: success
* @@version: omp_4.0
*/
#include <stdio.h>
#pragma omp declare simd uniform(fact)
double add1(double a, double b, double fact)
{
double c;
c = a + b + fact;
return c;
}
#pragma omp declare simd uniform(a,b,fact) linear(i:1)
double add2(double *a, double *b, int i, double fact)
{
double c;
c = a[i] + b[i] + fact;
return c;
}
#pragma omp declare simd uniform(fact) linear(a,b:1)
double add3(double *a, double *b, double fact)
{
double c;
c = *a + *b + fact;
return c;
}
void work( double *a, double *b, int n )
{
int i;
double tmp;
#pragma omp simd private(tmp)
for ( i = 0; i < n; i++ ) {
tmp = add1( a[i], b[i], 1.0);
a[i] = add2( a, b, i, 1.0) + tmp;
a[i] = add3(&a[i], &b[i], 1.0);
}
}
int main(){
int i;
const int N=32;
double a[N], b[N];
for ( i=0; i<N; i++ ) {
a[i] = i; b[i] = N-i;
}
work(a, b, N );
for ( i=0; i<N; i++ ) {
printf("%d %f\n", i, a[i]);
}
return 0;
}
|
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 32;
tile_size[1] = 32;
tile_size[2] = 32;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,16);t1++) {
lbp=max(ceild(t1,2),ceild(32*t1-Nt+3,32));
ubp=min(floord(Nt+Nz-4,32),floord(16*t1+Nz+13,32));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-1,2)),ceild(32*t2-Nz-28,32));t3<=min(min(min(floord(Nt+Ny-4,32),floord(16*t1+Ny+29,32)),floord(32*t2+Ny+28,32)),floord(32*t1-32*t2+Nz+Ny+27,32));t3++) {
for (t4=max(max(max(0,ceild(t1-3,4)),ceild(32*t2-Nz-60,64)),ceild(32*t3-Ny-60,64));t4<=min(min(min(min(floord(Nt+Nx-4,64),floord(16*t1+Nx+29,64)),floord(32*t2+Nx+28,64)),floord(32*t3+Nx+28,64)),floord(32*t1-32*t2+Nz+Nx+27,64));t4++) {
for (t5=max(max(max(max(max(0,16*t1),32*t1-32*t2+1),32*t2-Nz+2),32*t3-Ny+2),64*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,16*t1+31),32*t2+30),32*t3+30),64*t4+62),32*t1-32*t2+Nz+29);t5++) {
for (t6=max(max(32*t2,t5+1),-32*t1+32*t2+2*t5-31);t6<=min(min(32*t2+31,-32*t1+32*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(32*t3,t5+1);t7<=min(32*t3+31,t5+Ny-2);t7++) {
lbv=max(64*t4,t5+1);
ubv=min(64*t4+63,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
SpatialBatchNormalization.c | #include <math.h>
#include "../thnets.h"
static void nnfree_SpatialBatchNormalization(struct module *mod)
{
THFloatTensor_free(mod->SpatialBatchNormalization.running_mean);
THFloatTensor_free(mod->SpatialBatchNormalization.running_var);
THFloatTensor_free(mod->SpatialBatchNormalization.weight);
THFloatTensor_free(mod->SpatialBatchNormalization.bias);
}
int nnload_SpatialBatchNormalization(struct module *mod, struct nnmodule *n)
{
struct table *t = n->table;
mod->type = MT_SpatialBatchNormalization;
mod->updateOutput = nn_SpatialBatchNormalization_updateOutput;
mod->nnfree = nnfree_SpatialBatchNormalization;
struct SpatialBatchNormalization *m = &mod->SpatialBatchNormalization;
m->running_mean = TableGetTensor(t, "running_mean");
m->running_var = TableGetTensor(t, "running_var");
m->weight = TableGetTensor(t, "weight");
m->bias = TableGetTensor(t, "bias");
m->eps = TableGetNumber(t, "eps");
return 0;
}
void pyload_SpatialBatchNormalization(struct pyfunction *f)
{
f->module.updateOutput = nn_SpatialBatchNormalization_updateOutput;
f->module.type = MT_SpatialBatchNormalization;
f->module.nnfree = nnfree_SpatialBatchNormalization;
struct SpatialBatchNormalization *p = &f->module.SpatialBatchNormalization;
p->weight = pygettensor(f->params, "", 0);
p->bias = pygettensor(f->params, "", 1);
p->running_mean = pygettensor(f->params, "running_mean", 0);
p->running_var = pygettensor(f->params, "running_var", 0);
struct pyelement *el;
if( (el = findelement(f->params, "eps", 0)) && el->type == ELTYPE_FLOAT)
p->eps = el->fvalue;
}
#ifdef ONNX
void onnxload_SpatialBatchNormalization(const void *graph, struct module *m, int nodeidx)
{
m->updateOutput = nn_SpatialBatchNormalization_updateOutput;
m->nnfree = nnfree_SpatialBatchNormalization;
m->type = MT_SpatialBatchNormalization;
struct SpatialBatchNormalization *p = &m->SpatialBatchNormalization;
p->weight = onnx_gettensor(graph, nodeidx, 1);
p->bias = onnx_gettensor(graph, nodeidx, 2);
p->running_mean = onnx_gettensor(graph, nodeidx, 3);
p->running_var = onnx_gettensor(graph, nodeidx, 4);
p->eps = onnx_getfloat(graph, nodeidx, "epsilon", -1);
}
#endif
THFloatTensor *nn_SpatialBatchNormalization_updateOutput(struct module *module, THFloatTensor *input)
{
THFloatTensor *output = module->output;
THFloatTensor *running_mean = module->SpatialBatchNormalization.running_mean;
THFloatTensor *running_var = module->SpatialBatchNormalization.running_var;
THFloatTensor *weight = module->SpatialBatchNormalization.weight;
THFloatTensor *bias = module->SpatialBatchNormalization.bias;
long nFeature = input->nDimension == 4 ? input->size[1] : input->size[0];
double eps = module->SpatialBatchNormalization.eps;
THFloatTensor_resizeAs(output, input);
long f;
#ifndef MEMORYDEBUG
#pragma omp parallel for
#endif
for (f = 0; f < nFeature; ++f)
{
THFloatTensor *in = THFloatTensor_newSelect(input, input->nDimension == 4 ? 1 : 0, f);
THFloatTensor *out = THFloatTensor_newSelect(output, input->nDimension == 4 ? 1 : 0, f);
float mean, invstd;
mean = running_mean->storage->data[running_mean->storageOffset + running_mean->stride[0] * f];
invstd = 1 / sqrt(running_var->storage->data[running_var->storageOffset + running_var->stride[0] * f] + eps);
// compute output
float w = weight && weight->storage ? weight->storage->data[weight->storageOffset + weight->stride[0] * f] : 1;
float b = bias && bias->storage ? bias->storage->data[bias->storageOffset + bias->stride[0] * f] : 0;
float *ind = in->storage->data + in->storageOffset;
float *outd = out->storage->data + out->storageOffset;
if(in->nDimension == 1)
{
long i;
for(i = 0; i < in->size[0]; i++)
outd[out->stride[0] * i] = ((ind[in->stride[0] * i] - mean) * invstd) * w + b;
} else if(in->nDimension == 2)
{
long i, j;
for(i = 0; i < in->size[0]; i++)
for(j = 0; j < in->size[1]; j++)
outd[out->stride[0] * i + out->stride[1] * j] =
((ind[in->stride[0] * i + in->stride[1] * j] - mean) * invstd) * w + b;
} else if(in->nDimension == 3)
{
long i, j, k;
for(i = 0; i < in->size[0]; i++)
for(j = 0; j < in->size[1]; j++)
for(k = 0; k < in->size[2]; k++)
outd[out->stride[0] * i + out->stride[1] * j + out->stride[2] * k] =
((ind[in->stride[0] * i + in->stride[1] * j + in->stride[2] * k] - mean) * invstd) * w + b;
} else THError("SpatialBatchNormalization not supported for input dimensions higher of 4 (%d)\n", in->nDimension);
THFloatTensor_free(out);
THFloatTensor_free(in);
}
return output;
}
|
test1.c | /*
program main
implicit none
integer :: a = 10
!$omp target data map(tofrom: a)
!$omp target map(tofrom: a)
a = a + 1
!$omp end target
!$omp target update from(a)
!$omp end target data
a = -15
!$omp target update from(a)
print *, a !<-- expect -15; actual 11
end program main
2) segfault
$>cat t.f
program main
implicit none
integer :: a = 10
!$omp target map(tofrom: a)
a = a + 1
!$omp end target
!$omp target update from(a)
a = -15
!$omp target update from(a)
print *, a !<-- expect -15; actual segfault
end program main
*/
#include <stdio.h>
#pragma omp requires unified_shared_memory
#define TEST1 1
#define TEST2 1
int main()
{
int a;
// test 1
#if TEST1
a = 10;
#pragma omp target data map(tofrom: a)
{
#pragma omp target map(tofrom: a)
{
a = a + 1;
}
//printf("test 1: a is %d (after target)\n", a);
#pragma omp target update from(a)
}
//printf("test 1: a is %d (after target data)\n", a);
a = -15;
#pragma omp target update from(a)
printf("test 1: a is %d\n", a);
#endif
#if TEST2
// test 2
a = 10;
#pragma omp target map(tofrom: a)
{
a = a + 1;
}
#pragma omp target update from(a)
a = -15;
#pragma omp target update from(a)
#endif
printf("test 2: a is %d\n", a);
return 1;
}
|
mttkrp_omp.c | /*
This file is part of ParTI!.
ParTI! is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
ParTI! is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with ParTI!.
If not, see <http://www.gnu.org/licenses/>.
*/
#include <ParTI.h>
#include "hicoo.h"
#define CHUNKSIZE 1
/*************************************************
* PRIVATE FUNCTIONS
*************************************************/
int spt_OmpMTTKRPHiCOOKernels(
sptSparseTensorHiCOO const * const hitsr,
sptMatrix * mats[], // mats[nmodes] as temporary space.
sptIndex const mats_order[], // Correspond to the mode order of X.
sptIndex const mode,
const int tk);
int spt_OmpMTTKRPHiCOOKernels_MatrixTiling(
sptSparseTensorHiCOO const * const hitsr,
sptRankMatrix * mats[], // mats[nmodes] as temporary space.
sptIndex const mats_order[], // Correspond to the mode order of X.
sptIndex const mode,
const int tk);
int sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled(
sptSparseTensorHiCOO const * const hitsr,
sptRankMatrix * mats[], // mats[nmodes] as temporary space.
sptIndex const mats_order[], // Correspond to the mode order of X.
sptIndex const mode,
const int tk);
int sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled(
sptSparseTensorHiCOO const * const hitsr,
sptRankMatrix * mats[], // mats[nmodes] as temporary space.
sptIndex const mats_order[], // Correspond to the mode order of X.
sptIndex const mode,
const int tk);
int sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Balanced(
sptSparseTensorHiCOO const * const hitsr,
sptRankMatrix * mats[], // mats[nmodes] as temporary space.
sptIndex const mats_order[], // Correspond to the mode order of X.
sptIndex const mode,
const int tk);
int sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Balanced(
sptSparseTensorHiCOO const * const hitsr,
sptRankMatrix * mats[], // mats[nmodes] as temporary space.
sptIndex const mats_order[], // Correspond to the mode order of X.
sptIndex const mode,
const int tk);
int sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Reduce(
sptSparseTensorHiCOO const * const hitsr,
sptRankMatrix * mats[], // mats[nmodes] as temporary space.
sptRankMatrix * copy_mats[], // temporary matrices for reduction
sptIndex const mats_order[], // Correspond to the mode order of X.
sptIndex const mode,
const int tk);
int sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Reduce(
sptSparseTensorHiCOO const * const hitsr,
sptRankMatrix * mats[], // mats[nmodes] as temporary space.
sptRankMatrix * copy_mats[], // temporary matrices for reduction
sptIndex const mats_order[], // Correspond to the mode order of X.
sptIndex const mode,
const int tk);
int sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Reduce_Balanced(
sptSparseTensorHiCOO const * const hitsr,
sptRankMatrix * mats[], // mats[nmodes] as temporary space.
sptRankMatrix * copy_mats[], // temporary matrices for reduction
sptIndex const mats_order[], // Correspond to the mode order of X.
sptIndex const mode,
const int tk);
int sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Reduce_Balanced(
sptSparseTensorHiCOO const * const hitsr,
sptRankMatrix * mats[], // mats[nmodes] as temporary space.
sptRankMatrix * copy_mats[], // temporary matrices for reduction
sptIndex const mats_order[], // Correspond to the mode order of X.
sptIndex const mode,
const int tk);
/*************************************************
* PUBLIC FUNCTIONS
*************************************************/
/**
* OpenMP parallel Matriced sparse tensor in HiCOO format times a sequence of dense matrix Khatri-Rao products (MTTKRP) on a specified mode. OpenMP atomic is used.
* @param[out] mats[nmodes] the result of MTTKRP, a dense matrix, with size * ndims[mode] * R
* @param[in] hitsr the HiCOO sparse tensor input
* @param[in] mats (N+1) dense matrices, with mats[nmodes] as temporary
* @param[in] mats_order the order of the Khatri-Rao products
* @param[in] mode the mode on which the MTTKRP is performed
* @param[in] nt the number of threads
*
* This function uses support arbitrary-order sparse tensors with Khatri-Rao
* products of dense factor matrices, the output is the updated dense matrix for the "mode".
*/
int sptOmpMTTKRPHiCOO(
sptSparseTensorHiCOO const * const hitsr,
sptMatrix * mats[], // mats[nmodes] as temporary space.
sptIndex const mats_order[], // Correspond to the mode order of X.
sptIndex const mode,
const int nt)
{
sptAssert(spt_OmpMTTKRPHiCOOKernels(hitsr, mats, mats_order, mode, nt) == 0);
return 0;
}
/**
* OpenMP parallel Matriced sparse tensor in HiCOO format times a sequence of dense matrix Khatri-Rao products (MTTKRP) on a specified mode. OpenMP atomic is used. The tensor rank and columns of dense matrices are stored in less bits, in sptElementIndex type.
* @param[out] mats[nmodes] the result of MTTKRP, a dense matrix, with size * ndims[mode] * R
* @param[in] hitsr the HiCOO sparse tensor input
* @param[in] mats (N+1) dense matrices, with mats[nmodes] as temporary
* @param[in] mats_order the order of the Khatri-Rao products
* @param[in] mode the mode on which the MTTKRP is performed
* @param[in] nt the number of threads
*
* This function uses support arbitrary-order sparse tensors with Khatri-Rao
* products of dense factor matrices, the output is the updated dense matrix for the "mode".
*/
int sptOmpMTTKRPHiCOO_MatrixTiling(
sptSparseTensorHiCOO const * const hitsr,
sptRankMatrix * mats[], // mats[nmodes] as temporary space.
sptIndex const mats_order[], // Correspond to the mode order of X.
sptIndex const mode,
const int nt)
{
sptAssert(spt_OmpMTTKRPHiCOOKernels_MatrixTiling(hitsr, mats, mats_order, mode, nt) == 0);
return 0;
}
/**
* OpenMP parallel Matriced sparse tensor in HiCOO format times a sequence of dense matrix Khatri-Rao products (MTTKRP) on a specified mode. The tensor rank and columns of dense matrices are stored in less bits, in sptElementIndex type. We independently parallelize it by rows of the superblock scheduler.
* @param[out] mats[nmodes] the result of MTTKRP, a dense matrix, with size * ndims[mode] * R
* @param[in] hitsr the HiCOO sparse tensor input
* @param[in] mats (N+1) dense matrices, with mats[nmodes] as temporary
* @param[in] mats_order the order of the Khatri-Rao products
* @param[in] mode the mode on which the MTTKRP is performed
* @param[in] nt the number of threads
*
* This function uses support arbitrary-order sparse tensors with Khatri-Rao
* products of dense factor matrices, the output is the updated dense matrix for the "mode".
*/
int sptOmpMTTKRPHiCOO_MatrixTiling_Scheduled(
sptSparseTensorHiCOO const * const hitsr,
sptRankMatrix * mats[], // mats[nmodes] as temporary space.
sptIndex const mats_order[], // Correspond to the mode order of X.
sptIndex const mode,
const int tk,
int balanced)
{
if(tk > 1) {
if (balanced == 0)
sptAssert(sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled(hitsr, mats, mats_order, mode, tk) == 0);
else if (balanced == 1)
sptAssert(sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Balanced(hitsr, mats, mats_order, mode, tk) == 0);
}
return 0;
}
/**
* OpenMP parallel Matriced sparse tensor in HiCOO format times a sequence of dense matrix Khatri-Rao products (MTTKRP) on a specified mode. The tensor rank and columns of dense matrices are stored in less bits, in sptElementIndex type. We parallelize it by columns of the superblock scheduler, then use a parallel reduction.
This is a privatization method.
* @param[out] mats[nmodes] the result of MTTKRP, a dense matrix, with size * ndims[mode] * R
* @param[in] hitsr the HiCOO sparse tensor input
* @param[in] mats (N+1) dense matrices, with mats[nmodes] as temporary
* @param[in] mats_order the order of the Khatri-Rao products
* @param[in] mode the mode on which the MTTKRP is performed
* @param[in] nt the number of threads
*
* This function uses support arbitrary-order sparse tensors with Khatri-Rao
* products of dense factor matrices, the output is the updated dense matrix for the "mode".
*/
int sptOmpMTTKRPHiCOO_MatrixTiling_Scheduled_Reduce(
sptSparseTensorHiCOO const * const hitsr,
sptRankMatrix * mats[], // mats[nmodes] as temporary space.
sptRankMatrix * copy_mats[], // temporary matrices for reduction
sptIndex const mats_order[], // Correspond to the mode order of X.
sptIndex const mode,
const int tk,
int balanced)
{
if(tk > 1) {
if(balanced == 0)
sptAssert(sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Reduce(hitsr, mats, copy_mats, mats_order, mode, tk) == 0);
else if (balanced == 1)
sptAssert(sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Reduce_Balanced(hitsr, mats, copy_mats, mats_order, mode, tk) == 0);
}
return 0;
}
/*************************************************
* PRIVATE FUNCTIONS
*************************************************/
int spt_OmpMTTKRPHiCOOKernels_3D(
sptSparseTensorHiCOO const * const hitsr,
sptMatrix * mats[], // mats[nmodes] as temporary space.
sptIndex const mats_order[], // Correspond to the mode order of X.
sptIndex const mode,
const int tk)
{
sptIndex const nmodes = hitsr->nmodes;
sptIndex const * const ndims = hitsr->ndims;
sptValue const * const restrict vals = hitsr->values.data;
sptIndex const stride = mats[0]->stride;
/* Check the mats. */
sptAssert(nmodes ==3);
for(sptIndex i=0; i<nmodes; ++i) {
if(mats[i]->ncols != mats[nmodes]->ncols) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols");
}
if(mats[i]->nrows != ndims[i]) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]");
}
}
sptIndex const tmpI = mats[mode]->nrows;
sptIndex const R = mats[mode]->ncols;
sptMatrix * const restrict M = mats[nmodes];
sptValue * const restrict mvals = M->values;
memset(mvals, 0, tmpI*stride*sizeof(*mvals));
sptIndex times_mat_index_1 = mats_order[1];
sptMatrix * restrict times_mat_1 = mats[times_mat_index_1];
sptIndex times_mat_index_2 = mats_order[2];
sptMatrix * restrict times_mat_2 = mats[times_mat_index_2];
/* Loop kernels */
#pragma omp parallel for num_threads(tk)
for(sptIndex k=0; k<hitsr->kptr.len - 1; ++k) {
sptNnzIndex kptr_begin = hitsr->kptr.data[k];
sptNnzIndex kptr_end = hitsr->kptr.data[k+1];
/* Loop blocks in a kernel */
for(sptIndex b=kptr_begin; b<kptr_end; ++b) {
sptBlockIndex block_coord_mode = hitsr->binds[mode].data[b];
sptBlockIndex block_coord_1 = hitsr->binds[times_mat_index_1].data[b];
sptBlockIndex block_coord_2 = hitsr->binds[times_mat_index_2].data[b];
sptNnzIndex bptr_begin = hitsr->bptr.data[b];
sptNnzIndex bptr_end = hitsr->bptr.data[b+1];
/* Loop entries in a block */
for(sptIndex z=bptr_begin; z<bptr_end; ++z) {
sptIndex mode_i = (block_coord_mode << hitsr->sb_bits) + hitsr->einds[mode].data[z];
sptIndex tmp_i_1 = (block_coord_1 << hitsr->sb_bits) + hitsr->einds[times_mat_index_1].data[z];
sptIndex tmp_i_2 = (block_coord_2 << hitsr->sb_bits) + hitsr->einds[times_mat_index_2].data[z];
sptValue entry = vals[z];
for(sptIndex r=0; r<R; ++r) {
#pragma omp atomic update
mvals[mode_i * stride + r] += entry * times_mat_1->values[tmp_i_1 * stride + r] * times_mat_2->values[tmp_i_2 * stride + r];
}
} // End loop entries
} // End loop blocks
} // End loop kernels
return 0;
}
int spt_OmpMTTKRPHiCOOKernels(
sptSparseTensorHiCOO const * const hitsr,
sptMatrix * mats[], // mats[nmodes] as temporary space.
sptIndex const mats_order[], // Correspond to the mode order of X.
sptIndex const mode,
const int tk)
{
sptIndex const nmodes = hitsr->nmodes;
if(nmodes == 3) {
sptAssert(spt_OmpMTTKRPHiCOOKernels_3D(hitsr, mats, mats_order, mode, tk) == 0);
return 0;
}
sptIndex const * const ndims = hitsr->ndims;
sptValue const * const vals = hitsr->values.data;
sptIndex const stride = mats[0]->stride;
/* Check the mats. */
for(sptIndex i=0; i<nmodes; ++i) {
if(mats[i]->ncols != mats[nmodes]->ncols) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols");
}
if(mats[i]->nrows != ndims[i]) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]");
}
}
sptIndex const tmpI = mats[mode]->nrows;
sptIndex const R = mats[mode]->ncols;
sptMatrix * const M = mats[nmodes];
sptValue * const mvals = M->values;
memset(mvals, 0, tmpI*stride*sizeof(*mvals));
// omp_lock_t lock;
// omp_init_lock(&lock);
/* Loop kernels */
#pragma omp parallel for num_threads(tk)
for(sptIndex k=0; k<hitsr->kptr.len - 1; ++k) {
/* Allocate thread-private data */
sptIndex * block_coord = (sptIndex*)malloc(nmodes * sizeof(*block_coord));
sptIndex * ele_coord = (sptIndex*)malloc(nmodes * sizeof(*ele_coord));
sptValueVector scratch; // Temporary array
sptNewValueVector(&scratch, R, R);
sptNnzIndex kptr_begin = hitsr->kptr.data[k];
sptNnzIndex kptr_end = hitsr->kptr.data[k+1];
/* Loop blocks in a kernel */
for(sptNnzIndex b=kptr_begin; b<kptr_end; ++b) {
/* Block indices */
for(sptIndex m=0; m<nmodes; ++m)
block_coord[m] = hitsr->binds[m].data[b];
sptNnzIndex bptr_begin = hitsr->bptr.data[b];
sptNnzIndex bptr_end = hitsr->bptr.data[b+1];
/* Loop entries in a block */
for(sptNnzIndex z=bptr_begin; z<bptr_end; ++z) {
/* Element indices */
for(sptIndex m=0; m<nmodes; ++m)
ele_coord[m] = (block_coord[m] << hitsr->sb_bits) + hitsr->einds[m].data[z];
/* Multiply the 1st matrix */
sptIndex times_mat_index = mats_order[1];
sptMatrix * times_mat = mats[times_mat_index];
sptIndex tmp_i = ele_coord[times_mat_index];
sptValue const entry = vals[z];
for(sptIndex r=0; r<R; ++r) {
scratch.data[r] = entry * times_mat->values[tmp_i * stride + r];
}
/* Multiply the rest matrices */
for(sptIndex m=2; m<nmodes; ++m) {
times_mat_index = mats_order[m];
times_mat = mats[times_mat_index];
tmp_i = ele_coord[times_mat_index];
for(sptIndex r=0; r<R; ++r) {
scratch.data[r] *= times_mat->values[tmp_i * stride + r];
}
}
sptIndex const mode_i = ele_coord[mode];
// omp_set_lock(&lock);
for(sptIndex r=0; r<R; ++r) {
#pragma omp atomic update
mvals[mode_i * stride + r] += scratch.data[r];
}
// omp_unset_lock(&lock);
} // End loop entries
} // End loop blocks
/* Free thread-private space */
free(block_coord);
free(ele_coord);
sptFreeValueVector(&scratch);
} // End loop kernels
// omp_destroy_lock(&lock);
return 0;
}
int spt_OmpMTTKRPHiCOOKernels_3D_MatrixTiling(
sptSparseTensorHiCOO const * const hitsr,
sptRankMatrix * mats[], // mats[nmodes] as temporary space.
sptIndex const mats_order[], // Correspond to the mode order of X.
sptIndex const mode,
const int tk)
{
sptIndex const nmodes = hitsr->nmodes;
sptIndex const * const ndims = hitsr->ndims;
sptValue const * const restrict vals = hitsr->values.data;
sptElementIndex const stride = mats[0]->stride;
/* Check the mats. */
sptAssert(nmodes ==3);
for(sptIndex i=0; i<nmodes; ++i) {
if(mats[i]->ncols != mats[nmodes]->ncols) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols");
}
if(mats[i]->nrows != ndims[i]) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]");
}
}
sptIndex const tmpI = mats[mode]->nrows;
sptElementIndex const R = mats[mode]->ncols;
sptRankMatrix * const restrict M = mats[nmodes];
sptValue * const restrict mvals = M->values;
memset(mvals, 0, tmpI*stride*sizeof(*mvals));
sptIndex times_mat_index_1 = mats_order[1];
sptRankMatrix * restrict times_mat_1 = mats[times_mat_index_1];
sptIndex times_mat_index_2 = mats_order[2];
sptRankMatrix * restrict times_mat_2 = mats[times_mat_index_2];
/* Loop kernels */
#pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk)
for(sptIndex k=0; k<hitsr->kptr.len - 1; ++k) {
sptNnzIndex kptr_begin = hitsr->kptr.data[k];
sptNnzIndex kptr_end = hitsr->kptr.data[k+1];
/* Loop blocks in a kernel */
for(sptIndex b=kptr_begin; b<kptr_end; ++b) {
sptValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride;
sptValue * blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride;
sptValue * blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride;
sptNnzIndex bptr_begin = hitsr->bptr.data[b];
sptNnzIndex bptr_end = hitsr->bptr.data[b+1];
/* Loop entries in a block */
for(sptIndex z=bptr_begin; z<bptr_end; ++z) {
sptElementIndex mode_i = hitsr->einds[mode].data[z];
sptElementIndex tmp_i_1 = hitsr->einds[times_mat_index_1].data[z];
sptElementIndex tmp_i_2 = hitsr->einds[times_mat_index_2].data[z];
sptValue entry = vals[z];
sptValue * const restrict bmvals_row = blocked_mvals + mode_i * stride;
for(sptElementIndex r=0; r<R; ++r) {
#pragma omp atomic update
bmvals_row[r] += entry *
blocked_times_mat_1[(sptBlockMatrixIndex)tmp_i_1 * stride + r] *
blocked_times_mat_2[(sptBlockMatrixIndex)tmp_i_2 * stride + r];
}
} // End loop entries
} // End loop blocks
} // End loop kernels
return 0;
}
int spt_OmpMTTKRPHiCOOKernels_MatrixTiling(
sptSparseTensorHiCOO const * const hitsr,
sptRankMatrix * mats[], // mats[nmodes] as temporary space.
sptIndex const mats_order[], // Correspond to the mode order of X.
sptIndex const mode,
const int tk)
{
sptIndex const nmodes = hitsr->nmodes;
if(nmodes == 3) {
sptAssert(spt_OmpMTTKRPHiCOOKernels_3D_MatrixTiling(hitsr, mats, mats_order, mode, tk) == 0);
return 0;
}
sptIndex const * const ndims = hitsr->ndims;
sptValue const * const restrict vals = hitsr->values.data;
sptElementIndex const stride = mats[0]->stride;
/* Check the mats. */
for(sptIndex i=0; i<nmodes; ++i) {
if(mats[i]->ncols != mats[nmodes]->ncols) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols");
}
if(mats[i]->nrows != ndims[i]) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]");
}
}
sptIndex const tmpI = mats[mode]->nrows;
sptElementIndex const R = mats[mode]->ncols;
sptRankMatrix * const restrict M = mats[nmodes];
sptValue * const restrict mvals = M->values;
memset(mvals, 0, tmpI*stride*sizeof(*mvals));
/* Loop kernels */
#pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk)
for(sptIndex k=0; k<hitsr->kptr.len - 1; ++k) {
/* Allocate thread-private data */
sptValue ** blocked_times_mat = (sptValue**)malloc(nmodes * sizeof(*blocked_times_mat));
sptValueVector scratch; // Temporary array
sptNewValueVector(&scratch, R, R);
sptNnzIndex kptr_begin = hitsr->kptr.data[k];
sptNnzIndex kptr_end = hitsr->kptr.data[k+1];
/* Loop blocks in a kernel */
for(sptNnzIndex b=kptr_begin; b<kptr_end; ++b) {
/* Blocked matrices */
for(sptIndex m=0; m<nmodes; ++m)
blocked_times_mat[m] = mats[m]->values + (hitsr->binds[m].data[b] << hitsr->sb_bits) * stride;
sptValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride;
sptNnzIndex bptr_begin = hitsr->bptr.data[b];
sptNnzIndex bptr_end = hitsr->bptr.data[b+1];
/* Loop entries in a block */
for(sptIndex z=bptr_begin; z<bptr_end; ++z) {
/* Multiply the 1st matrix */
sptIndex times_mat_index = mats_order[1];
sptElementIndex tmp_i = hitsr->einds[times_mat_index].data[z];
sptValue const entry = vals[z];
#pragma omp simd
for(sptElementIndex r=0; r<R; ++r) {
scratch.data[r] = entry * blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r];
}
/* Multiply the rest matrices */
for(sptIndex m=2; m<nmodes; ++m) {
times_mat_index = mats_order[m];
tmp_i = hitsr->einds[times_mat_index].data[z];
#pragma omp simd
for(sptElementIndex r=0; r<R; ++r) {
scratch.data[r] *= blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r];
}
}
sptElementIndex const mode_i = hitsr->einds[mode].data[z];
sptValue * const restrict bmvals_row = blocked_mvals + mode_i * stride;
for(sptElementIndex r=0; r<R; ++r) {
#pragma omp atomic update
bmvals_row[r] += scratch.data[r];
}
} // End loop entries
} // End loop blocks
/* Free thread-private space */
free(blocked_times_mat);
sptFreeValueVector(&scratch);
} // End loop kernels
return 0;
}
int sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled(
sptSparseTensorHiCOO const * const hitsr,
sptRankMatrix * mats[], // mats[nmodes] as temporary space.
sptIndex const mats_order[], // Correspond to the mode order of X.
sptIndex const mode,
const int tk)
{
sptIndex const nmodes = hitsr->nmodes;
sptIndex const * const ndims = hitsr->ndims;
sptValue const * const restrict vals = hitsr->values.data;
sptElementIndex const stride = mats[0]->stride;
/* Check the mats. */
sptAssert(nmodes ==3);
for(sptIndex i=0; i<nmodes; ++i) {
if(mats[i]->ncols != mats[nmodes]->ncols) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols");
}
if(mats[i]->nrows != ndims[i]) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]");
}
}
sptIndex const tmpI = mats[mode]->nrows;
sptElementIndex const R = mats[mode]->ncols;
sptRankMatrix * const restrict M = mats[nmodes];
sptValue * const restrict mvals = M->values;
memset(mvals, 0, tmpI*stride*sizeof(*mvals));
sptIndex times_mat_index_1 = mats_order[1];
sptRankMatrix * restrict times_mat_1 = mats[times_mat_index_1];
sptIndex times_mat_index_2 = mats_order[2];
sptRankMatrix * restrict times_mat_2 = mats[times_mat_index_2];
sptIndex sk = (sptIndex)pow(2, hitsr->sk_bits);
sptIndex num_kernel_dim = (ndims[mode] + sk - 1) / sk;
sptIndexVector * restrict kschr_mode = hitsr->kschr[mode];
// printf("nkiters: %u, num_kernel_dim: %u\n", hitsr->nkiters[mode], num_kernel_dim);
#ifdef NNZ_STATISTICS
sptNnzIndex * thread_nnzs = (sptNnzIndex*)malloc(tk * sizeof(sptNnzIndex));
memset(thread_nnzs, 0, tk * sizeof(sptNnzIndex));
#endif
/* Loop parallel iterations */
for(sptIndex i=0; i<hitsr->nkiters[mode]; ++i) {
/* Loop kernels */
#ifdef NNZ_STATISTICS
#pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs)
#else
#pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk)
#endif
for(sptIndex k=0; k<num_kernel_dim; ++k) {
int tid = omp_get_thread_num();
// printf("tid: %d, (i, k): (%u, %u)\n", tid, i, k);
if(i >= kschr_mode[k].len) {
// printf("i: %u, k: %u\n", i, k);
continue;
}
sptIndex kptr_loc = kschr_mode[k].data[i];
sptNnzIndex kptr_begin = hitsr->kptr.data[kptr_loc];
sptNnzIndex kptr_end = hitsr->kptr.data[kptr_loc+1];
/* Loop blocks in a kernel */
for(sptIndex b=kptr_begin; b<kptr_end; ++b) {
sptValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride;
sptValue * blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride;
sptValue * blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride;
sptNnzIndex bptr_begin = hitsr->bptr.data[b];
sptNnzIndex bptr_end = hitsr->bptr.data[b+1];
#ifdef NNZ_STATISTICS
thread_nnzs[tid] += (bptr_end - bptr_begin);
#endif
/* Loop entries in a block */
for(sptIndex z=bptr_begin; z<bptr_end; ++z) {
sptElementIndex mode_i = hitsr->einds[mode].data[z];
sptElementIndex tmp_i_1 = hitsr->einds[times_mat_index_1].data[z];
sptElementIndex tmp_i_2 = hitsr->einds[times_mat_index_2].data[z];
sptValue entry = vals[z];
#pragma omp simd
for(sptElementIndex r=0; r<R; ++r) {
blocked_mvals[(sptBlockMatrixIndex)mode_i * stride + r] += entry *
blocked_times_mat_1[(sptBlockMatrixIndex)tmp_i_1 * stride + r] *
blocked_times_mat_2[(sptBlockMatrixIndex)tmp_i_2 * stride + r];
}
} // End loop entries
} // End loop blocks
} // End loop kernels
} // End loop iterations
#ifdef NNZ_STATISTICS
/* Calculate load balance of kernels */
sptNnzIndex sum_nnzs = 0, min_nnzs = hitsr->nnz, max_nnzs = 0;
double std_nnzs = 0.0;
double avg_nnzs = hitsr->nnz / (double)tk;
// printf("thread_nnzs:\n");
for(int i = 0; i < tk; ++i) {
// printf("%"PARTI_PRI_NNZ_INDEX", ", thread_nnzs[i]);
sum_nnzs += thread_nnzs[i];
if(min_nnzs > thread_nnzs[i])
min_nnzs = thread_nnzs[i];
if(max_nnzs < thread_nnzs[i])
max_nnzs = thread_nnzs[i];
std_nnzs += (thread_nnzs[i] - avg_nnzs) * (thread_nnzs[i] - avg_nnzs);
}
// printf("\n");
std_nnzs = sqrt(std_nnzs / tk);
printf("min_nnzs: %"PARTI_PRI_NNZ_INDEX ", max_nnzs: %"PARTI_PRI_NNZ_INDEX ", avg_nnzs: %.1lf, std_nnzs: %.1lf\n", min_nnzs, max_nnzs, avg_nnzs, std_nnzs);
sptAssert(sum_nnzs == hitsr->nnz);
free(thread_nnzs);
#endif
return 0;
}
int sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled(
sptSparseTensorHiCOO const * const hitsr,
sptRankMatrix * mats[], // mats[nmodes] as temporary space.
sptIndex const mats_order[], // Correspond to the mode order of X.
sptIndex const mode,
const int tk)
{
sptIndex const nmodes = hitsr->nmodes;
if(nmodes == 3) {
sptAssert(sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled(hitsr, mats, mats_order, mode, tk) == 0);
return 0;
}
sptIndex const * const ndims = hitsr->ndims;
sptValue const * const restrict vals = hitsr->values.data;
sptElementIndex const stride = mats[0]->stride;
/* Check the mats. */
for(sptIndex i=0; i<nmodes; ++i) {
if(mats[i]->ncols != mats[nmodes]->ncols) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols");
}
if(mats[i]->nrows != ndims[i]) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]");
}
}
sptIndex const tmpI = mats[mode]->nrows;
sptElementIndex const R = mats[mode]->ncols;
sptRankMatrix * const restrict M = mats[nmodes];
sptValue * const restrict mvals = M->values;
memset(mvals, 0, tmpI*stride*sizeof(*mvals));
sptIndex sk = (sptIndex)pow(2, hitsr->sk_bits);
sptIndex num_kernel_dim = (ndims[mode] + sk - 1) / sk;
sptIndexVector * restrict kschr_mode = hitsr->kschr[mode];
#ifdef NNZ_STATISTICS
sptNnzIndex * thread_nnzs = (sptNnzIndex*)malloc(tk * sizeof(sptNnzIndex));
memset(thread_nnzs, 0, tk * sizeof(sptNnzIndex));
#endif
/* Loop parallel iterations */
for(sptIndex i=0; i<hitsr->nkiters[mode]; ++i) {
/* Loop kernels */
#ifdef NNZ_STATISTICS
#pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs)
#else
#pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk)
#endif
for(sptIndex k=0; k<num_kernel_dim; ++k) {
int tid = omp_get_thread_num();
if(i >= kschr_mode[k].len) continue;
sptIndex kptr_loc = kschr_mode[k].data[i];
sptNnzIndex kptr_begin = hitsr->kptr.data[kptr_loc];
sptNnzIndex kptr_end = hitsr->kptr.data[kptr_loc+1];
/* Allocate thread-private data */
sptValue ** blocked_times_mat = (sptValue**)malloc(nmodes * sizeof(*blocked_times_mat));
sptValueVector scratch; // Temporary array
sptNewValueVector(&scratch, R, R);
/* Loop blocks in a kernel */
for(sptNnzIndex b=kptr_begin; b<kptr_end; ++b) {
/* Blocked matrices */
for(sptIndex m=0; m<nmodes; ++m)
blocked_times_mat[m] = mats[m]->values + (hitsr->binds[m].data[b] << hitsr->sb_bits) * stride;
sptValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride;
sptNnzIndex bptr_begin = hitsr->bptr.data[b];
sptNnzIndex bptr_end = hitsr->bptr.data[b+1];
#ifdef NNZ_STATISTICS
thread_nnzs[tid] += (bptr_end - bptr_begin);
#endif
/* Loop entries in a block */
for(sptIndex z=bptr_begin; z<bptr_end; ++z) {
/* Multiply the 1st matrix */
sptIndex times_mat_index = mats_order[1];
sptElementIndex tmp_i = hitsr->einds[times_mat_index].data[z];
sptValue const entry = vals[z];
#pragma omp simd
for(sptElementIndex r=0; r<R; ++r) {
scratch.data[r] = entry * blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r];
}
/* Multiply the rest matrices */
for(sptIndex m=2; m<nmodes; ++m) {
times_mat_index = mats_order[m];
tmp_i = hitsr->einds[times_mat_index].data[z];
#pragma omp simd
for(sptElementIndex r=0; r<R; ++r) {
scratch.data[r] *= blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r];
}
}
sptElementIndex const mode_i = hitsr->einds[mode].data[z];
#pragma omp simd
for(sptElementIndex r=0; r<R; ++r) {
blocked_mvals[(sptBlockMatrixIndex)mode_i * stride + r] += scratch.data[r];
}
} // End loop entries
} // End loop blocks
/* Free thread-private space */
free(blocked_times_mat);
sptFreeValueVector(&scratch);
} // End loop kernels
} // End loop iterations
#ifdef NNZ_STATISTICS
/* Calculate load balance of kernels */
sptNnzIndex sum_nnzs = 0, min_nnzs = hitsr->nnz, max_nnzs = 0;
double std_nnzs = 0.0;
double avg_nnzs = hitsr->nnz / (double)tk;
// printf("thread_nnzs:\n");
for(int i = 0; i < tk; ++i) {
// printf("%"PARTI_PRI_NNZ_INDEX", ", thread_nnzs[i]);
sum_nnzs += thread_nnzs[i];
if(min_nnzs > thread_nnzs[i])
min_nnzs = thread_nnzs[i];
if(max_nnzs < thread_nnzs[i])
max_nnzs = thread_nnzs[i];
std_nnzs += (thread_nnzs[i] - avg_nnzs) * (thread_nnzs[i] - avg_nnzs);
}
// printf("\n");
std_nnzs = sqrt(std_nnzs / tk);
printf("min_nnzs: %"PARTI_PRI_NNZ_INDEX ", max_nnzs: %"PARTI_PRI_NNZ_INDEX ", avg_nnzs: %.1lf, std_nnzs: %.1lf\n", min_nnzs, max_nnzs, avg_nnzs, std_nnzs);
sptAssert(sum_nnzs == hitsr->nnz);
free(thread_nnzs);
#endif
return 0;
}
int sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Balanced(
sptSparseTensorHiCOO const * const hitsr,
sptRankMatrix * mats[], // mats[nmodes] as temporary space.
sptIndex const mats_order[], // Correspond to the mode order of X.
sptIndex const mode,
const int tk)
{
sptIndex const nmodes = hitsr->nmodes;
sptIndex const * const ndims = hitsr->ndims;
sptValue const * const restrict vals = hitsr->values.data;
sptElementIndex const stride = mats[0]->stride;
/* Check the mats. */
sptAssert(nmodes ==3);
for(sptIndex i=0; i<nmodes; ++i) {
if(mats[i]->ncols != mats[nmodes]->ncols) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols");
}
if(mats[i]->nrows != ndims[i]) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]");
}
}
sptIndex const tmpI = mats[mode]->nrows;
sptElementIndex const R = mats[mode]->ncols;
sptRankMatrix * const restrict M = mats[nmodes];
sptValue * const restrict mvals = M->values;
memset(mvals, 0, tmpI*stride*sizeof(*mvals));
sptIndex times_mat_index_1 = mats_order[1];
sptRankMatrix * restrict times_mat_1 = mats[times_mat_index_1];
sptIndex times_mat_index_2 = mats_order[2];
sptRankMatrix * restrict times_mat_2 = mats[times_mat_index_2];
sptIndex sk = (sptIndex)pow(2, hitsr->sk_bits);
sptIndex num_kernel_dim = (ndims[mode] + sk - 1) / sk;
sptIndexVector * restrict kschr_balanced_mode = hitsr->kschr_balanced[mode];
sptIndexVector * restrict kschr_balanced_pos_mode = hitsr->kschr_balanced_pos[mode];
sptIndex npars = hitsr->nkpars[mode];
// printf("nkiters: %u, num_kernel_dim: %u\n", hitsr->nkiters[mode], num_kernel_dim);
#ifdef NNZ_STATISTICS
sptNnzIndex * thread_nnzs = (sptNnzIndex*)malloc(tk * sizeof(sptNnzIndex));
memset(thread_nnzs, 0, tk * sizeof(sptNnzIndex));
#endif
/* Loop partitions */
for(sptIndex p=0; p<npars; ++p) {
/* Loop kernels */
#ifdef NNZ_STATISTICS
#pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs)
#else
#pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk)
#endif
for(sptIndex i=0; i<num_kernel_dim; ++i) {
if(p >= kschr_balanced_pos_mode[i].len - 1) continue;
int tid = omp_get_thread_num();
sptIndex j_begin = kschr_balanced_pos_mode[i].data[p];
sptIndex j_end = kschr_balanced_pos_mode[i].data[p+1];
/* Loop inside a partition */
for(sptIndex j = j_begin; j < j_end; ++j) {
sptIndex kernel_num = kschr_balanced_mode[i].data[j];
// printf("tid: %d, (i, j): (%u, %u), kernel_num: %u\n", tid, i, j, kernel_num);
sptNnzIndex kptr_begin = hitsr->kptr.data[kernel_num];
sptNnzIndex kptr_end = hitsr->kptr.data[kernel_num+1];
/* Loop blocks in a kernel */
for(sptIndex b=kptr_begin; b<kptr_end; ++b) {
sptValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride;
sptValue * blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride;
sptValue * blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride;
sptNnzIndex bptr_begin = hitsr->bptr.data[b];
sptNnzIndex bptr_end = hitsr->bptr.data[b+1];
#ifdef NNZ_STATISTICS
thread_nnzs[tid] += (bptr_end - bptr_begin);
#endif
/* Loop entries in a block */
for(sptIndex z=bptr_begin; z<bptr_end; ++z) {
sptElementIndex mode_i = hitsr->einds[mode].data[z];
sptElementIndex tmp_i_1 = hitsr->einds[times_mat_index_1].data[z];
sptElementIndex tmp_i_2 = hitsr->einds[times_mat_index_2].data[z];
sptValue entry = vals[z];
#pragma omp simd
for(sptElementIndex r=0; r<R; ++r) {
blocked_mvals[(sptBlockMatrixIndex)mode_i * stride + r] += entry *
blocked_times_mat_1[(sptBlockMatrixIndex)tmp_i_1 * stride + r] *
blocked_times_mat_2[(sptBlockMatrixIndex)tmp_i_2 * stride + r];
}
} // End loop entries
} // End loop blocks
} // End loop inside a partition
} // End loop kernels
} // End loop partitions
/* Process using atomics */
#ifdef NNZ_STATISTICS
#pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs)
#else
#pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk)
#endif
for(sptIndex k = 0; k < hitsr->kschr_rest[mode].len; ++k) {
int tid = omp_get_thread_num();
sptIndex kernel_num = hitsr->kschr_rest[mode].data[k];
sptNnzIndex kptr_begin = hitsr->kptr.data[kernel_num];
sptNnzIndex kptr_end = hitsr->kptr.data[kernel_num+1];
/* Loop blocks in a kernel */
for(sptIndex b=kptr_begin; b<kptr_end; ++b) {
sptValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride;
sptValue * blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride;
sptValue * blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride;
sptNnzIndex bptr_begin = hitsr->bptr.data[b];
sptNnzIndex bptr_end = hitsr->bptr.data[b+1];
#ifdef NNZ_STATISTICS
thread_nnzs[tid] += (bptr_end - bptr_begin);
#endif
/* Loop entries in a block */
for(sptIndex z=bptr_begin; z<bptr_end; ++z) {
sptElementIndex mode_i = hitsr->einds[mode].data[z];
sptElementIndex tmp_i_1 = hitsr->einds[times_mat_index_1].data[z];
sptElementIndex tmp_i_2 = hitsr->einds[times_mat_index_2].data[z];
sptValue entry = vals[z];
sptValue * const restrict bmvals_row = blocked_mvals + mode_i * stride;
for(sptElementIndex r=0; r<R; ++r) {
#pragma omp atomic update
bmvals_row[r] += entry *
blocked_times_mat_1[(sptBlockMatrixIndex)tmp_i_1 * stride + r] *
blocked_times_mat_2[(sptBlockMatrixIndex)tmp_i_2 * stride + r];
}
} // End loop entries
} // End loop blocks
} // End loop kernels
#ifdef NNZ_STATISTICS
/* Calculate load balance of kernels */
sptNnzIndex sum_nnzs = 0, min_nnzs = hitsr->nnz, max_nnzs = 0;
double std_nnzs = 0.0;
double avg_nnzs = hitsr->nnz / (double)tk;
// printf("thread_nnzs:\n");
for(int i = 0; i < tk; ++i) {
// printf("%"PARTI_PRI_NNZ_INDEX", ", thread_nnzs[i]);
sum_nnzs += thread_nnzs[i];
if(min_nnzs > thread_nnzs[i])
min_nnzs = thread_nnzs[i];
if(max_nnzs < thread_nnzs[i])
max_nnzs = thread_nnzs[i];
std_nnzs += (thread_nnzs[i] - avg_nnzs) * (thread_nnzs[i] - avg_nnzs);
}
// printf("\n");
std_nnzs = sqrt(std_nnzs / tk);
printf("min_nnzs: %"PARTI_PRI_NNZ_INDEX ", max_nnzs: %"PARTI_PRI_NNZ_INDEX ", avg_nnzs: %.1lf, std_nnzs: %.1lf\n", min_nnzs, max_nnzs, avg_nnzs, std_nnzs);
sptAssert(sum_nnzs == hitsr->nnz);
free(thread_nnzs);
#endif
return 0;
}
int sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Balanced(
sptSparseTensorHiCOO const * const hitsr,
sptRankMatrix * mats[], // mats[nmodes] as temporary space.
sptIndex const mats_order[], // Correspond to the mode order of X.
sptIndex const mode,
const int tk)
{
sptIndex const nmodes = hitsr->nmodes;
if(nmodes == 3) {
sptAssert(sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Balanced(hitsr, mats, mats_order, mode, tk) == 0);
return 0;
}
sptIndex const * const ndims = hitsr->ndims;
sptValue const * const restrict vals = hitsr->values.data;
sptElementIndex const stride = mats[0]->stride;
/* Check the mats. */
for(sptIndex i=0; i<nmodes; ++i) {
if(mats[i]->ncols != mats[nmodes]->ncols) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols");
}
if(mats[i]->nrows != ndims[i]) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]");
}
}
sptIndex const tmpI = mats[mode]->nrows;
sptElementIndex const R = mats[mode]->ncols;
sptRankMatrix * const restrict M = mats[nmodes];
sptValue * const restrict mvals = M->values;
memset(mvals, 0, tmpI*stride*sizeof(*mvals));
sptIndex sk = (sptIndex)pow(2, hitsr->sk_bits);
sptIndex num_kernel_dim = (ndims[mode] + sk - 1) / sk;
sptIndexVector * restrict kschr_balanced_mode = hitsr->kschr_balanced[mode];
sptIndexVector * restrict kschr_balanced_pos_mode = hitsr->kschr_balanced_pos[mode];
sptIndex npars = hitsr->nkpars[mode];
#ifdef NNZ_STATISTICS
sptNnzIndex * thread_nnzs = (sptNnzIndex*)malloc(tk * sizeof(sptNnzIndex));
memset(thread_nnzs, 0, tk * sizeof(sptNnzIndex));
#endif
/* Loop partitions */
for(sptIndex p=0; p<npars; ++p) {
/* Loop kernels */
#ifdef NNZ_STATISTICS
#pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs)
#else
#pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk)
#endif
for(sptIndex i=0; i<num_kernel_dim; ++i) {
if(p >= kschr_balanced_pos_mode[i].len - 1) continue;
int tid = omp_get_thread_num();
sptIndex j_begin = kschr_balanced_pos_mode[i].data[p];
sptIndex j_end = kschr_balanced_pos_mode[i].data[p+1];
/* Loop inside a partition */
for(sptIndex j = j_begin; j < j_end; ++j) {
sptIndex kernel_num = kschr_balanced_mode[i].data[j];
sptNnzIndex kptr_begin = hitsr->kptr.data[kernel_num];
sptNnzIndex kptr_end = hitsr->kptr.data[kernel_num+1];
/* Allocate thread-private data */
sptValue ** blocked_times_mat = (sptValue**)malloc(nmodes * sizeof(*blocked_times_mat));
sptValueVector scratch; // Temporary array
sptNewValueVector(&scratch, R, R);
/* Loop blocks in a kernel */
for(sptNnzIndex b=kptr_begin; b<kptr_end; ++b) {
/* Blocked matrices */
for(sptIndex m=0; m<nmodes; ++m)
blocked_times_mat[m] = mats[m]->values + (hitsr->binds[m].data[b] << hitsr->sb_bits) * stride;
sptValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride;
sptNnzIndex bptr_begin = hitsr->bptr.data[b];
sptNnzIndex bptr_end = hitsr->bptr.data[b+1];
#ifdef NNZ_STATISTICS
thread_nnzs[tid] += (bptr_end - bptr_begin);
#endif
/* Loop entries in a block */
for(sptIndex z=bptr_begin; z<bptr_end; ++z) {
/* Multiply the 1st matrix */
sptIndex times_mat_index = mats_order[1];
sptElementIndex tmp_i = hitsr->einds[times_mat_index].data[z];
sptValue const entry = vals[z];
#pragma omp simd
for(sptElementIndex r=0; r<R; ++r) {
scratch.data[r] = entry * blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r];
}
/* Multiply the rest matrices */
for(sptIndex m=2; m<nmodes; ++m) {
times_mat_index = mats_order[m];
tmp_i = hitsr->einds[times_mat_index].data[z];
#pragma omp simd
for(sptElementIndex r=0; r<R; ++r) {
scratch.data[r] *= blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r];
}
}
sptElementIndex const mode_i = hitsr->einds[mode].data[z];
#pragma omp simd
for(sptElementIndex r=0; r<R; ++r) {
blocked_mvals[(sptBlockMatrixIndex)mode_i * stride + r] += scratch.data[r];
}
} // End loop entries
} // End loop blocks
/* Free thread-private space */
free(blocked_times_mat);
sptFreeValueVector(&scratch);
} // End loop inside a partition
} // End loop kernels
} // End loop partitions
/* Process using atomics */
#ifdef NNZ_STATISTICS
#pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs)
#else
#pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk)
#endif
for(sptIndex k = 0; k < hitsr->kschr_rest[mode].len; ++k) {
int tid = omp_get_thread_num();
sptIndex kernel_num = hitsr->kschr_rest[mode].data[k];
sptNnzIndex kptr_begin = hitsr->kptr.data[kernel_num];
sptNnzIndex kptr_end = hitsr->kptr.data[kernel_num+1];
/* Allocate thread-private data */
sptValue ** blocked_times_mat = (sptValue**)malloc(nmodes * sizeof(*blocked_times_mat));
sptValueVector scratch; // Temporary array
sptNewValueVector(&scratch, R, R);
/* Loop blocks in a kernel */
for(sptNnzIndex b=kptr_begin; b<kptr_end; ++b) {
/* Blocked matrices */
for(sptIndex m=0; m<nmodes; ++m)
blocked_times_mat[m] = mats[m]->values + (hitsr->binds[m].data[b] << hitsr->sb_bits) * stride;
sptValue * blocked_mvals = mvals + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride;
sptNnzIndex bptr_begin = hitsr->bptr.data[b];
sptNnzIndex bptr_end = hitsr->bptr.data[b+1];
#ifdef NNZ_STATISTICS
thread_nnzs[tid] += (bptr_end - bptr_begin);
#endif
/* Loop entries in a block */
for(sptIndex z=bptr_begin; z<bptr_end; ++z) {
/* Multiply the 1st matrix */
sptIndex times_mat_index = mats_order[1];
sptElementIndex tmp_i = hitsr->einds[times_mat_index].data[z];
sptValue const entry = vals[z];
#pragma omp simd
for(sptElementIndex r=0; r<R; ++r) {
scratch.data[r] = entry * blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r];
}
/* Multiply the rest matrices */
for(sptIndex m=2; m<nmodes; ++m) {
times_mat_index = mats_order[m];
tmp_i = hitsr->einds[times_mat_index].data[z];
#pragma omp simd
for(sptElementIndex r=0; r<R; ++r) {
scratch.data[r] *= blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r];
}
}
sptElementIndex const mode_i = hitsr->einds[mode].data[z];
sptValue * const restrict bmvals_row = blocked_mvals + mode_i * stride;
for(sptElementIndex r=0; r<R; ++r) {
#pragma omp atomic update
bmvals_row[r] += scratch.data[r];
}
} // End loop entries
} // End loop blocks
/* Free thread-private space */
free(blocked_times_mat);
sptFreeValueVector(&scratch);
} // End loop kernels
#ifdef NNZ_STATISTICS
/* Calculate load balance of kernels */
sptNnzIndex sum_nnzs = 0, min_nnzs = hitsr->nnz, max_nnzs = 0;
double std_nnzs = 0.0;
double avg_nnzs = hitsr->nnz / (double)tk;
// printf("thread_nnzs:\n");
for(int i = 0; i < tk; ++i) {
// printf("%"PARTI_PRI_NNZ_INDEX", ", thread_nnzs[i]);
sum_nnzs += thread_nnzs[i];
if(min_nnzs > thread_nnzs[i])
min_nnzs = thread_nnzs[i];
if(max_nnzs < thread_nnzs[i])
max_nnzs = thread_nnzs[i];
std_nnzs += (thread_nnzs[i] - avg_nnzs) * (thread_nnzs[i] - avg_nnzs);
}
// printf("\n");
std_nnzs = sqrt(std_nnzs / tk);
printf("min_nnzs: %"PARTI_PRI_NNZ_INDEX ", max_nnzs: %"PARTI_PRI_NNZ_INDEX ", avg_nnzs: %.1lf, std_nnzs: %.1lf\n", min_nnzs, max_nnzs, avg_nnzs, std_nnzs);
sptAssert(sum_nnzs == hitsr->nnz);
free(thread_nnzs);
#endif
return 0;
}
int sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Reduce(
sptSparseTensorHiCOO const * const hitsr,
sptRankMatrix * mats[], // mats[nmodes] as temporary space.
sptRankMatrix * copy_mats[], // temporary matrices for reduction
sptIndex const mats_order[], // Correspond to the mode order of X.
sptIndex const mode,
const int tk)
{
sptIndex const nmodes = hitsr->nmodes;
sptIndex const * const ndims = hitsr->ndims;
sptValue const * const restrict vals = hitsr->values.data;
sptElementIndex const stride = mats[0]->stride;
/* Check the mats. */
sptAssert(nmodes ==3);
for(sptIndex i=0; i<nmodes; ++i) {
if(mats[i]->ncols != mats[nmodes]->ncols) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols");
}
if(mats[i]->nrows != ndims[i]) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]");
}
}
sptIndex const tmpI = mats[mode]->nrows;
sptElementIndex const R = mats[mode]->ncols;
sptRankMatrix * const restrict M = mats[nmodes];
sptValue * const restrict mvals = M->values;
memset(mvals, 0, tmpI*stride*sizeof(*mvals));
for(int t=0; t<tk; ++t) {
memset(copy_mats[t]->values, 0, ndims[mode]*stride*sizeof(*(copy_mats[t]->values)));
}
sptIndex times_mat_index_1 = mats_order[1];
sptRankMatrix * restrict times_mat_1 = mats[times_mat_index_1];
sptIndex times_mat_index_2 = mats_order[2];
sptRankMatrix * restrict times_mat_2 = mats[times_mat_index_2];
sptIndex sk = (sptIndex)pow(2, hitsr->sk_bits);
sptIndex num_kernel_dim = (ndims[mode] + sk - 1) / sk;
sptIndexVector * restrict kschr_mode = hitsr->kschr[mode];
#ifdef NNZ_STATISTICS
sptNnzIndex * thread_nnzs = (sptNnzIndex*)malloc(tk * sizeof(sptNnzIndex));
memset(thread_nnzs, 0, tk * sizeof(sptNnzIndex));
#endif
/* Loop parallel iterations */
#ifdef NNZ_STATISTICS
#pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs)
#else
#pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk)
#endif
for(sptIndex i=0; i<hitsr->nkiters[mode]; ++i) {
int tid = omp_get_thread_num();
/* Loop kernels */
for(sptIndex k=0; k<num_kernel_dim; ++k) {
if(i >= kschr_mode[k].len) {
// printf("i: %u, k: %u\n", i, k);
continue;
}
sptIndex kptr_loc = kschr_mode[k].data[i];
sptNnzIndex kptr_begin = hitsr->kptr.data[kptr_loc];
sptNnzIndex kptr_end = hitsr->kptr.data[kptr_loc+1];
/* Loop blocks in a kernel */
for(sptIndex b=kptr_begin; b<kptr_end; ++b) {
/* use copy_mats to store each thread's output */
sptValue * blocked_mvals = copy_mats[tid]->values + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride;
sptValue * blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride;
sptValue * blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride;
sptNnzIndex bptr_begin = hitsr->bptr.data[b];
sptNnzIndex bptr_end = hitsr->bptr.data[b+1];
#ifdef NNZ_STATISTICS
thread_nnzs[tid] += (bptr_end - bptr_begin);
#endif
/* Loop entries in a block */
for(sptIndex z=bptr_begin; z<bptr_end; ++z) {
sptElementIndex mode_i = hitsr->einds[mode].data[z];
sptElementIndex tmp_i_1 = hitsr->einds[times_mat_index_1].data[z];
sptElementIndex tmp_i_2 = hitsr->einds[times_mat_index_2].data[z];
sptValue entry = vals[z];
#pragma omp simd
for(sptElementIndex r=0; r<R; ++r) {
blocked_mvals[(sptBlockMatrixIndex)mode_i * stride + r] += entry *
blocked_times_mat_1[(sptBlockMatrixIndex)tmp_i_1 * stride + r] *
blocked_times_mat_2[(sptBlockMatrixIndex)tmp_i_2 * stride + r];
}
} // End loop entries
} // End loop blocks
} // End loop kernels
} // End loop iterations
/* Reduction */
#pragma omp parallel for schedule(static) num_threads(tk)
for(sptIndex i=0; i<ndims[mode]; ++i) {
for(int t=0; t<tk; ++t) {
#pragma omp simd
for(sptElementIndex r=0; r<R; ++r) {
mvals[i * stride + r] += copy_mats[t]->values[i * stride + r];
}
}
}
#ifdef NNZ_STATISTICS
/* Calculate load balance of kernels */
sptNnzIndex sum_nnzs = 0, min_nnzs = hitsr->nnz, max_nnzs = 0;
double std_nnzs = 0.0;
double avg_nnzs = hitsr->nnz / (double)tk;
// printf("thread_nnzs:\n");
for(int i = 0; i < tk; ++i) {
// printf("%"PARTI_PRI_NNZ_INDEX", ", thread_nnzs[i]);
sum_nnzs += thread_nnzs[i];
if(min_nnzs > thread_nnzs[i])
min_nnzs = thread_nnzs[i];
if(max_nnzs < thread_nnzs[i])
max_nnzs = thread_nnzs[i];
std_nnzs += (thread_nnzs[i] - avg_nnzs) * (thread_nnzs[i] - avg_nnzs);
}
// printf("\n");
std_nnzs = sqrt(std_nnzs / tk);
printf("min_nnzs: %"PARTI_PRI_NNZ_INDEX ", max_nnzs: %"PARTI_PRI_NNZ_INDEX ", avg_nnzs: %.1lf, std_nnzs: %.1lf\n", min_nnzs, max_nnzs, avg_nnzs, std_nnzs);
sptAssert(sum_nnzs == hitsr->nnz);
free(thread_nnzs);
#endif
return 0;
}
int sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Reduce(
sptSparseTensorHiCOO const * const hitsr,
sptRankMatrix * mats[], // mats[nmodes] as temporary space.
sptRankMatrix * copy_mats[], // temporary matrices for reduction
sptIndex const mats_order[], // Correspond to the mode order of X.
sptIndex const mode,
const int tk)
{
sptIndex const nmodes = hitsr->nmodes;
if(nmodes == 3) {
sptAssert(sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Reduce(hitsr, mats, copy_mats, mats_order, mode, tk) == 0);
return 0;
}
sptIndex const * const ndims = hitsr->ndims;
sptValue const * const restrict vals = hitsr->values.data;
sptElementIndex const stride = mats[0]->stride;
/* Check the mats. */
for(sptIndex i=0; i<nmodes; ++i) {
if(mats[i]->ncols != mats[nmodes]->ncols) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols");
}
if(mats[i]->nrows != ndims[i]) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]");
}
}
sptIndex const tmpI = mats[mode]->nrows;
sptElementIndex const R = mats[mode]->ncols;
sptRankMatrix * const restrict M = mats[nmodes];
sptValue * const restrict mvals = M->values;
memset(mvals, 0, tmpI*stride*sizeof(*mvals));
for(int t=0; t<tk; ++t) {
memset(copy_mats[t]->values, 0, ndims[mode]*stride*sizeof(*(copy_mats[t]->values)));
}
sptIndex sk = (sptIndex)pow(2, hitsr->sk_bits);
sptIndex num_kernel_dim = (ndims[mode] + sk - 1) / sk;
sptIndexVector * restrict kschr_mode = hitsr->kschr[mode];
#ifdef NNZ_STATISTICS
sptNnzIndex * thread_nnzs = (sptNnzIndex*)malloc(tk * sizeof(sptNnzIndex));
memset(thread_nnzs, 0, tk * sizeof(sptNnzIndex));
#endif
/* Loop parallel iterations */
#ifdef NNZ_STATISTICS
#pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs)
#else
#pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk)
#endif
for(sptIndex i=0; i<hitsr->nkiters[mode]; ++i) {
int tid = omp_get_thread_num();
/* Loop kernels */
for(sptIndex k=0; k<num_kernel_dim; ++k) {
if(i >= kschr_mode[k].len) continue;
sptIndex kptr_loc = kschr_mode[k].data[i];
sptNnzIndex kptr_begin = hitsr->kptr.data[kptr_loc];
sptNnzIndex kptr_end = hitsr->kptr.data[kptr_loc+1];
/* Allocate thread-private data */
sptValue ** blocked_times_mat = (sptValue**)malloc(nmodes * sizeof(*blocked_times_mat));
sptValueVector scratch; // Temporary array
sptNewValueVector(&scratch, R, R);
/* Loop blocks in a kernel */
for(sptNnzIndex b=kptr_begin; b<kptr_end; ++b) {
/* Blocked matrices */
for(sptIndex m=0; m<nmodes; ++m)
blocked_times_mat[m] = mats[m]->values + (hitsr->binds[m].data[b] << hitsr->sb_bits) * stride;
sptValue * blocked_mvals = copy_mats[tid]->values + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride;
sptNnzIndex bptr_begin = hitsr->bptr.data[b];
sptNnzIndex bptr_end = hitsr->bptr.data[b+1];
#ifdef NNZ_STATISTICS
thread_nnzs[tid] += (bptr_end - bptr_begin);
#endif
/* Loop entries in a block */
for(sptIndex z=bptr_begin; z<bptr_end; ++z) {
/* Multiply the 1st matrix */
sptIndex times_mat_index = mats_order[1];
sptElementIndex tmp_i = hitsr->einds[times_mat_index].data[z];
sptValue const entry = vals[z];
#pragma omp simd
for(sptElementIndex r=0; r<R; ++r) {
scratch.data[r] = entry * blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r];
}
/* Multiply the rest matrices */
for(sptIndex m=2; m<nmodes; ++m) {
times_mat_index = mats_order[m];
tmp_i = hitsr->einds[times_mat_index].data[z];
#pragma omp simd
for(sptElementIndex r=0; r<R; ++r) {
scratch.data[r] *= blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r];
}
}
sptElementIndex const mode_i = hitsr->einds[mode].data[z];
#pragma omp simd
for(sptElementIndex r=0; r<R; ++r) {
blocked_mvals[(sptBlockMatrixIndex)mode_i * stride + r] += scratch.data[r];
}
} // End loop entries
} // End loop blocks
/* Free thread-private space */
free(blocked_times_mat);
sptFreeValueVector(&scratch);
} // End loop kernels
} // End loop iterations
/* Reduction */
#pragma omp parallel for schedule(static) num_threads(tk)
for(sptIndex i=0; i<ndims[mode]; ++i) {
for(int t=0; t<tk; ++t) {
#pragma omp simd
for(sptElementIndex r=0; r<R; ++r) {
mvals[i * stride + r] += copy_mats[t]->values[i * stride + r];
}
}
}
#ifdef NNZ_STATISTICS
/* Calculate load balance of kernels */
sptNnzIndex sum_nnzs = 0, min_nnzs = hitsr->nnz, max_nnzs = 0;
double std_nnzs = 0.0;
double avg_nnzs = hitsr->nnz / (double)tk;
for(int i = 0; i < tk; ++i) {
sum_nnzs += thread_nnzs[i];
if(min_nnzs > thread_nnzs[i])
min_nnzs = thread_nnzs[i];
if(max_nnzs < thread_nnzs[i])
max_nnzs = thread_nnzs[i];
std_nnzs += (thread_nnzs[i] - avg_nnzs) * (thread_nnzs[i] - avg_nnzs);
}
std_nnzs = sqrt(std_nnzs / tk);
printf("min_nnzs: %"PARTI_PRI_NNZ_INDEX ", max_nnzs: %"PARTI_PRI_NNZ_INDEX ", avg_nnzs: %.1lf, std_nnzs: %.1lf\n", min_nnzs, max_nnzs, avg_nnzs, std_nnzs);
sptAssert(sum_nnzs == hitsr->nnz);
free(thread_nnzs);
#endif
return 0;
}
int sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Reduce_Balanced(
sptSparseTensorHiCOO const * const hitsr,
sptRankMatrix * mats[], // mats[nmodes] as temporary space.
sptRankMatrix * copy_mats[], // temporary matrices for reduction
sptIndex const mats_order[], // Correspond to the mode order of X.
sptIndex const mode,
const int tk)
{
sptIndex const nmodes = hitsr->nmodes;
sptIndex const * const ndims = hitsr->ndims;
sptValue const * const restrict vals = hitsr->values.data;
sptElementIndex const stride = mats[0]->stride;
/* Check the mats. */
sptAssert(nmodes ==3);
for(sptIndex i=0; i<nmodes; ++i) {
if(mats[i]->ncols != mats[nmodes]->ncols) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols");
}
if(mats[i]->nrows != ndims[i]) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]");
}
}
sptIndex const tmpI = mats[mode]->nrows;
sptElementIndex const R = mats[mode]->ncols;
sptRankMatrix * const restrict M = mats[nmodes];
sptValue * const restrict mvals = M->values;
memset(mvals, 0, tmpI*stride*sizeof(*mvals));
for(int t=0; t<tk; ++t) {
memset(copy_mats[t]->values, 0, ndims[mode]*stride*sizeof(*(copy_mats[t]->values)));
}
sptIndex times_mat_index_1 = mats_order[1];
sptRankMatrix * restrict times_mat_1 = mats[times_mat_index_1];
sptIndex times_mat_index_2 = mats_order[2];
sptRankMatrix * restrict times_mat_2 = mats[times_mat_index_2];
sptIndex sk = (sptIndex)pow(2, hitsr->sk_bits);
sptIndex num_kernel_dim = (ndims[mode] + sk - 1) / sk;
sptIndexVector * restrict kschr_balanced_mode = hitsr->kschr_balanced[mode];
sptIndexVector * restrict kschr_balanced_pos_mode = hitsr->kschr_balanced_pos[mode];
sptIndex npars = hitsr->nkpars[mode];
#ifdef NNZ_STATISTICS
sptNnzIndex * thread_nnzs = (sptNnzIndex*)malloc(tk * sizeof(sptNnzIndex));
memset(thread_nnzs, 0, tk * sizeof(sptNnzIndex));
#endif
/* Loop parallel iterations */
#ifdef NNZ_STATISTICS
#pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs)
#else
#pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk)
#endif
for(sptIndex p=0; p<npars; ++p) {
int tid = omp_get_thread_num();
/* Loop kernels */
for(sptIndex i=0; i<num_kernel_dim; ++i) {
if(p >= kschr_balanced_pos_mode[i].len - 1) continue;
sptIndex j_begin = kschr_balanced_pos_mode[i].data[p];
sptIndex j_end = kschr_balanced_pos_mode[i].data[p+1];
for(sptIndex j=j_begin; j<j_end; ++j) {
sptIndex kernel_num = kschr_balanced_mode[i].data[j];
sptNnzIndex kptr_begin = hitsr->kptr.data[kernel_num];
sptNnzIndex kptr_end = hitsr->kptr.data[kernel_num+1];
/* Loop blocks in a kernel */
for(sptIndex b=kptr_begin; b<kptr_end; ++b) {
/* use copy_mats to store each thread's output */
sptValue * blocked_mvals = copy_mats[tid]->values + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride;
sptValue * blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride;
sptValue * blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride;
sptNnzIndex bptr_begin = hitsr->bptr.data[b];
sptNnzIndex bptr_end = hitsr->bptr.data[b+1];
#ifdef NNZ_STATISTICS
thread_nnzs[tid] += (bptr_end - bptr_begin);
#endif
/* Loop entries in a block */
for(sptIndex z=bptr_begin; z<bptr_end; ++z) {
sptElementIndex mode_i = hitsr->einds[mode].data[z];
sptElementIndex tmp_i_1 = hitsr->einds[times_mat_index_1].data[z];
sptElementIndex tmp_i_2 = hitsr->einds[times_mat_index_2].data[z];
sptValue entry = vals[z];
#pragma omp simd
for(sptElementIndex r=0; r<R; ++r) {
blocked_mvals[(sptBlockMatrixIndex)mode_i * stride + r] += entry *
blocked_times_mat_1[(sptBlockMatrixIndex)tmp_i_1 * stride + r] *
blocked_times_mat_2[(sptBlockMatrixIndex)tmp_i_2 * stride + r];
}
} // End loop entries
} // End loop blocks
} // End kernels in a partition
} // End loop kernels
} // End loop partitions
/* Process using atomics */
#ifdef NNZ_STATISTICS
#pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs)
#else
#pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk)
#endif
for(sptIndex k = 0; k < hitsr->kschr_rest[mode].len; ++k) {
int tid = omp_get_thread_num();
sptIndex kernel_num = hitsr->kschr_rest[mode].data[k];
sptNnzIndex kptr_begin = hitsr->kptr.data[kernel_num];
sptNnzIndex kptr_end = hitsr->kptr.data[kernel_num+1];
/* Loop blocks in a kernel */
for(sptIndex b=kptr_begin; b<kptr_end; ++b) {
/* Use copy_mats to reduce atomics */
sptValue * blocked_mvals = copy_mats[tid]->values + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride;
sptValue * blocked_times_mat_1 = times_mat_1->values + (hitsr->binds[times_mat_index_1].data[b] << hitsr->sb_bits) * stride;
sptValue * blocked_times_mat_2 = times_mat_2->values + (hitsr->binds[times_mat_index_2].data[b] << hitsr->sb_bits) * stride;
sptNnzIndex bptr_begin = hitsr->bptr.data[b];
sptNnzIndex bptr_end = hitsr->bptr.data[b+1];
#ifdef NNZ_STATISTICS
thread_nnzs[tid] += (bptr_end - bptr_begin);
#endif
/* Loop entries in a block */
for(sptIndex z=bptr_begin; z<bptr_end; ++z) {
sptElementIndex mode_i = hitsr->einds[mode].data[z];
sptElementIndex tmp_i_1 = hitsr->einds[times_mat_index_1].data[z];
sptElementIndex tmp_i_2 = hitsr->einds[times_mat_index_2].data[z];
sptValue entry = vals[z];
sptValue * const restrict bmvals_row = blocked_mvals + mode_i * stride;
for(sptElementIndex r=0; r<R; ++r) {
#pragma omp atomic update
bmvals_row[r] += entry *
blocked_times_mat_1[(sptBlockMatrixIndex)tmp_i_1 * stride + r] *
blocked_times_mat_2[(sptBlockMatrixIndex)tmp_i_2 * stride + r];
}
} // End loop entries
} // End loop blocks
} // End loop kernels
/* Reduction */
#pragma omp parallel for schedule(static) num_threads(tk)
for(sptIndex i=0; i<ndims[mode]; ++i) {
for(int t=0; t<tk; ++t) {
#pragma omp simd
for(sptElementIndex r=0; r<R; ++r) {
mvals[i * stride + r] += copy_mats[t]->values[i * stride + r];
}
}
}
/* Calculate load balance of kernels */
#ifdef NNZ_STATISTICS
sptNnzIndex sum_nnzs = 0, min_nnzs = hitsr->nnz, max_nnzs = 0;
double std_nnzs = 0.0;
double avg_nnzs = hitsr->nnz / (double)tk;
// printf("thread_nnzs:\n");
for(int i = 0; i < tk; ++i) {
// printf("%"PARTI_PRI_NNZ_INDEX", ", thread_nnzs[i]);
sum_nnzs += thread_nnzs[i];
if(min_nnzs > thread_nnzs[i])
min_nnzs = thread_nnzs[i];
if(max_nnzs < thread_nnzs[i])
max_nnzs = thread_nnzs[i];
std_nnzs += (thread_nnzs[i] - avg_nnzs) * (thread_nnzs[i] - avg_nnzs);
}
// printf("\n");
std_nnzs = sqrt(std_nnzs / tk);
printf("min_nnzs: %"PARTI_PRI_NNZ_INDEX ", max_nnzs: %"PARTI_PRI_NNZ_INDEX ", avg_nnzs: %.1lf, std_nnzs: %.1lf\n", min_nnzs, max_nnzs, avg_nnzs, std_nnzs);
sptAssert(sum_nnzs == hitsr->nnz);
free(thread_nnzs);
#endif
return 0;
}
int sptOmpMTTKRPHiCOOKernels_MatrixTiling_Scheduled_Reduce_Balanced(
sptSparseTensorHiCOO const * const hitsr,
sptRankMatrix * mats[], // mats[nmodes] as temporary space.
sptRankMatrix * copy_mats[], // temporary matrices for reduction
sptIndex const mats_order[], // Correspond to the mode order of X.
sptIndex const mode,
const int tk)
{
sptIndex const nmodes = hitsr->nmodes;
if(nmodes == 3) {
sptAssert(sptOmpMTTKRPHiCOOKernels_3D_MatrixTiling_Scheduled_Reduce_Balanced(hitsr, mats, copy_mats, mats_order, mode, tk) == 0);
return 0;
}
sptIndex const * const ndims = hitsr->ndims;
sptValue const * const restrict vals = hitsr->values.data;
sptElementIndex const stride = mats[0]->stride;
/* Check the mats. */
for(sptIndex i=0; i<nmodes; ++i) {
if(mats[i]->ncols != mats[nmodes]->ncols) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols");
}
if(mats[i]->nrows != ndims[i]) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP HiCOO SpTns MTTKRP", "mats[i]->nrows != ndims[i]");
}
}
sptIndex const tmpI = mats[mode]->nrows;
sptElementIndex const R = mats[mode]->ncols;
sptRankMatrix * const restrict M = mats[nmodes];
sptValue * const restrict mvals = M->values;
memset(mvals, 0, tmpI*stride*sizeof(*mvals));
for(int t=0; t<tk; ++t) {
memset(copy_mats[t]->values, 0, ndims[mode]*stride*sizeof(*(copy_mats[t]->values)));
}
sptIndex sk = (sptIndex)pow(2, hitsr->sk_bits);
sptIndex num_kernel_dim = (ndims[mode] + sk - 1) / sk;
sptIndexVector * restrict kschr_balanced_mode = hitsr->kschr_balanced[mode];
sptIndexVector * restrict kschr_balanced_pos_mode = hitsr->kschr_balanced_pos[mode];
sptIndex npars = hitsr->nkpars[mode];
#ifdef NNZ_STATISTICS
sptNnzIndex * thread_nnzs = (sptNnzIndex*)malloc(tk * sizeof(sptNnzIndex));
memset(thread_nnzs, 0, tk * sizeof(sptNnzIndex));
#endif
/* Loop parallel iterations */
#ifdef NNZ_STATISTICS
#pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs)
#else
#pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk)
#endif
for(sptIndex p=0; p<npars; ++p) {
int tid = omp_get_thread_num();
/* Loop kernels */
for(sptIndex i=0; i<num_kernel_dim; ++i) {
if(p >= kschr_balanced_pos_mode[i].len - 1) continue;
sptIndex j_begin = kschr_balanced_pos_mode[i].data[p];
sptIndex j_end = kschr_balanced_pos_mode[i].data[p+1];
for(sptIndex j=j_begin; j<j_end; ++j) {
sptIndex kernel_num = kschr_balanced_mode[i].data[j];
sptNnzIndex kptr_begin = hitsr->kptr.data[kernel_num];
sptNnzIndex kptr_end = hitsr->kptr.data[kernel_num+1];
/* Allocate thread-private data */
sptValue ** blocked_times_mat = (sptValue**)malloc(nmodes * sizeof(*blocked_times_mat));
sptValueVector scratch; // Temporary array
sptNewValueVector(&scratch, R, R);
/* Loop blocks in a kernel */
for(sptNnzIndex b=kptr_begin; b<kptr_end; ++b) {
/* Blocked matrices */
for(sptIndex m=0; m<nmodes; ++m)
blocked_times_mat[m] = mats[m]->values + (hitsr->binds[m].data[b] << hitsr->sb_bits) * stride;
sptValue * blocked_mvals = copy_mats[tid]->values + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride;
sptNnzIndex bptr_begin = hitsr->bptr.data[b];
sptNnzIndex bptr_end = hitsr->bptr.data[b+1];
#ifdef NNZ_STATISTICS
thread_nnzs[tid] += (bptr_end - bptr_begin);
#endif
/* Loop entries in a block */
for(sptIndex z=bptr_begin; z<bptr_end; ++z) {
/* Multiply the 1st matrix */
sptIndex times_mat_index = mats_order[1];
sptElementIndex tmp_i = hitsr->einds[times_mat_index].data[z];
sptValue const entry = vals[z];
#pragma omp simd
for(sptElementIndex r=0; r<R; ++r) {
scratch.data[r] = entry * blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r];
}
/* Multiply the rest matrices */
for(sptIndex m=2; m<nmodes; ++m) {
times_mat_index = mats_order[m];
tmp_i = hitsr->einds[times_mat_index].data[z];
#pragma omp simd
for(sptElementIndex r=0; r<R; ++r) {
scratch.data[r] *= blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r];
}
}
sptElementIndex const mode_i = hitsr->einds[mode].data[z];
#pragma omp simd
for(sptElementIndex r=0; r<R; ++r) {
blocked_mvals[(sptBlockMatrixIndex)mode_i * stride + r] += scratch.data[r];
}
} // End loop entries
} // End loop blocks
/* Free thread-private space */
free(blocked_times_mat);
sptFreeValueVector(&scratch);
} // End kernels in a partition
} // End loop kernels
} // End loop iterations
/* Process using atomics */
#ifdef NNZ_STATISTICS
#pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk) shared(thread_nnzs)
#else
#pragma omp parallel for schedule(dynamic, CHUNKSIZE) num_threads(tk)
#endif
for(sptIndex k = 0; k < hitsr->kschr_rest[mode].len; ++k) {
int tid = omp_get_thread_num();
sptIndex kernel_num = hitsr->kschr_rest[mode].data[k];
sptNnzIndex kptr_begin = hitsr->kptr.data[kernel_num];
sptNnzIndex kptr_end = hitsr->kptr.data[kernel_num+1];
/* Allocate thread-private data */
sptValue ** blocked_times_mat = (sptValue**)malloc(nmodes * sizeof(*blocked_times_mat));
sptValueVector scratch; // Temporary array
sptNewValueVector(&scratch, R, R);
/* Loop blocks in a kernel */
for(sptNnzIndex b=kptr_begin; b<kptr_end; ++b) {
/* Blocked matrices */
for(sptIndex m=0; m<nmodes; ++m)
blocked_times_mat[m] = mats[m]->values + (hitsr->binds[m].data[b] << hitsr->sb_bits) * stride;
/* Use copy_mats to reduce atomics */
sptValue * blocked_mvals = copy_mats[tid]->values + (hitsr->binds[mode].data[b] << hitsr->sb_bits) * stride;
sptNnzIndex bptr_begin = hitsr->bptr.data[b];
sptNnzIndex bptr_end = hitsr->bptr.data[b+1];
#ifdef NNZ_STATISTICS
thread_nnzs[tid] += (bptr_end - bptr_begin);
#endif
/* Loop entries in a block */
for(sptIndex z=bptr_begin; z<bptr_end; ++z) {
/* Multiply the 1st matrix */
sptIndex times_mat_index = mats_order[1];
sptElementIndex tmp_i = hitsr->einds[times_mat_index].data[z];
sptValue const entry = vals[z];
#pragma omp simd
for(sptElementIndex r=0; r<R; ++r) {
scratch.data[r] = entry * blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r];
}
/* Multiply the rest matrices */
for(sptIndex m=2; m<nmodes; ++m) {
times_mat_index = mats_order[m];
tmp_i = hitsr->einds[times_mat_index].data[z];
#pragma omp simd
for(sptElementIndex r=0; r<R; ++r) {
scratch.data[r] *= blocked_times_mat[times_mat_index][(sptBlockMatrixIndex)tmp_i * stride + r];
}
}
sptElementIndex const mode_i = hitsr->einds[mode].data[z];
sptValue * const restrict bmvals_row = blocked_mvals + mode_i * stride;
for(sptElementIndex r=0; r<R; ++r) {
#pragma omp atomic update
bmvals_row[r] += scratch.data[r];
}
} // End loop entries
} // End loop blocks
/* Free thread-private space */
free(blocked_times_mat);
sptFreeValueVector(&scratch);
} // End loop kernels
/* Reduction */
#pragma omp parallel for schedule(static) num_threads(tk)
for(sptIndex i=0; i<ndims[mode]; ++i) {
for(int t=0; t<tk; ++t) {
#pragma omp simd
for(sptElementIndex r=0; r<R; ++r) {
mvals[i * stride + r] += copy_mats[t]->values[i * stride + r];
}
}
}
#ifdef NNZ_STATISTICS
/* Calculate load balance of kernels */
sptNnzIndex sum_nnzs = 0, min_nnzs = hitsr->nnz, max_nnzs = 0;
double std_nnzs = 0.0;
double avg_nnzs = hitsr->nnz / (double)tk;
// printf("thread_nnzs:\n");
for(int i = 0; i < tk; ++i) {
// printf("%"PARTI_PRI_NNZ_INDEX", ", thread_nnzs[i]);
sum_nnzs += thread_nnzs[i];
if(min_nnzs > thread_nnzs[i])
min_nnzs = thread_nnzs[i];
if(max_nnzs < thread_nnzs[i])
max_nnzs = thread_nnzs[i];
std_nnzs += (thread_nnzs[i] - avg_nnzs) * (thread_nnzs[i] - avg_nnzs);
}
// printf("\n");
std_nnzs = sqrt(std_nnzs / tk);
printf("min_nnzs: %"PARTI_PRI_NNZ_INDEX ", max_nnzs: %"PARTI_PRI_NNZ_INDEX ", avg_nnzs: %.1lf, std_nnzs: %.1lf\n", min_nnzs, max_nnzs, avg_nnzs, std_nnzs);
sptAssert(sum_nnzs == hitsr->nnz);
free(thread_nnzs);
#endif
return 0;
}
|
singlenode_intersectreduce.h | /******************************************************************************
* ** Copyright (c) 2016, Intel Corporation **
* ** All rights reserved. **
* ** **
* ** Redistribution and use in source and binary forms, with or without **
* ** modification, are permitted provided that the following conditions **
* ** are met: **
* ** 1. Redistributions of source code must retain the above copyright **
* ** notice, this list of conditions and the following disclaimer. **
* ** 2. Redistributions in binary form must reproduce the above copyright **
* ** notice, this list of conditions and the following disclaimer in the **
* ** documentation and/or other materials provided with the distribution. **
* ** 3. Neither the name of the copyright holder nor the names of its **
* ** contributors may be used to endorse or promote products derived **
* ** from this software without specific prior written permission. **
* ** **
* ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS **
* ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT **
* ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR **
* ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT **
* ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, **
* ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED **
* ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR **
* ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF **
* ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING **
* ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS **
* ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* * ******************************************************************************/
/* Michael Anderson (Intel Corp.)
* * ******************************************************************************/
#ifndef SRC_SINGLENODE_INTERSECTREDUCE_H_
#define SRC_SINGLENODE_INTERSECTREDUCE_H_
#include <algorithm>
#include "src/bitvector.h"
#ifdef INTERSECT_MKL
#error
template <typename Ta, typename Tb, typename Tc>
void my_dintersect(int m, int n, Ta *a, int *ja, int *ia, Tb *b, int *jb,
int *ib, Tc **c, int **jc, int **ic, Tc (*op_fp)(Ta, Tb)) {
int nnzc = std::max(ia[m] - 1, ib[m] - 1);
int nzmax = nnzc;
(*ic) = reinterpret_cast<int *>(_mm_malloc((m + 1) * sizeof(int), 64));
(*c) = reinterpret_cast<Tc *>(
_mm_malloc((uint64_t)nnzc * (uint64_t)sizeof(Tc), 64));
(*jc) = reinterpret_cast<int *>(
_mm_malloc((uint64_t)nnzc * (uint64_t)sizeof(int), 64));
// Add new_c = c + tc
int cnz_cnt = 0;
for (int row = 0; row < m; row++) {
(*ic)[row] = cnz_cnt + 1;
// Merge c row and tc row into new_c row
int Astart = ia[row];
int Aend = ia[row + 1];
int Bstart = ib[row];
int Bend = ib[row + 1];
while ((Astart < Aend) || (Bstart < Bend)) {
int Acol = (Astart != Aend) ? ja[Astart - 1] : INT_MAX;
int Bcol = (Bstart != Bend) ? jb[Bstart - 1] : INT_MAX;
if (Acol < Bcol) {
Astart++;
} else if (Bcol < Acol) {
Bstart++;
} else {
(*c)[cnz_cnt] = op_fp(a[Astart - 1], b[Bstart - 1], vsp);
(*jc)[cnz_cnt] = Acol;
cnz_cnt++;
Astart++;
Bstart++;
}
}
}
(*ic)[m] = cnz_cnt + 1;
}
#endif
#ifdef INTERSECT_NAIVE_MERGE
template <typename Ta, typename Tb, typename Tc>
void my_dintersect(int m, int n, Ta *a, int *ja, int *ia, Tb *b, int *jb,
int *ib, Tc **c, int **jc, int **ic, Tc (*op_fp)(Ta, Tb)) {
#ifndef SORTED
#error Merge kernels require sorted inputs
#endif
int nnzc = std::max(ia[m] - 1, ib[m] - 1);
int nzmax = nnzc;
(*ic) = reinterpret_cast<int *>(_mm_malloc((m + 1) * sizeof(int), 64));
(*c) = reinterpret_cast<Tc *>(
_mm_malloc((uint64_t)nnzc * (uint64_t)sizeof(Tc), 64));
(*jc) = reinterpret_cast<int *>(
_mm_malloc((uint64_t)nnzc * (uint64_t)sizeof(int), 64));
// Add new_c = c + tc
int cnz_cnt = 0;
for (int row = 0; row < m; row++) {
(*ic)[row] = cnz_cnt + 1;
// Merge c row and tc row into new_c row
int Astart = ia[row];
int Aend = ia[row + 1];
int Bstart = ib[row];
int Bend = ib[row + 1];
while ((Astart < Aend) || (Bstart < Bend)) {
int Acol = (Astart != Aend) ? ja[Astart - 1] : INT_MAX;
int Bcol = (Bstart != Bend) ? jb[Bstart - 1] : INT_MAX;
if (Acol < Bcol) {
Astart++;
} else if (Bcol < Acol) {
Bstart++;
} else {
(*c)[cnz_cnt] = op_fp(a[Astart - 1], b[Bstart - 1], vsp);
(*jc)[cnz_cnt] = Acol;
cnz_cnt++;
Astart++;
Bstart++;
}
}
}
(*ic)[m] = cnz_cnt + 1;
}
#endif
#ifdef INTERSECT_PARALLEL_MERGE
template <typename Ta, typename Tb, typename Tc>
void my_dintersect(int m, int n, Ta *a, int *ja, int *ia, Tb *b, int *jb,
int *ib, Tc **c, int **jc, int **ic, void (*op_fp)(Ta, Tb, Tc*, void*), void* vsp) {
#ifndef SORTED
#error Merge kernels require sorted inputs
#endif
int num_threads = omp_get_max_threads();
assert(num_threads <= omp_get_max_threads());
(*ic) = reinterpret_cast<int *>(_mm_malloc((m + 1) * sizeof(int), 64));
int nchunks = num_threads;
int chunksize = (m + nchunks - 1) / nchunks;
int *nnzs =
reinterpret_cast<int *>(_mm_malloc((nchunks + 1) * sizeof(int), 64));
memset(nnzs, 0, num_threads * sizeof(int));
Tc **c_t = new Tc *[nchunks];
int **jc_t = new int *[nchunks];
#pragma omp parallel num_threads(num_threads)
{
int tid = omp_get_thread_num();
#pragma omp for schedule(dynamic)
for (int chunk = 0; chunk < nchunks; chunk++) {
int start_row = chunk * chunksize;
int end_row = (chunk + 1) * chunksize;
if (end_row > m) end_row = m;
// Determine number of nonzeros
int nnzA = ia[end_row] - ia[start_row];
int nnzB = ib[end_row] - ib[start_row];
int nnzmax = std::max(nnzA, nnzB);
// Allocate space for nonzeros
c_t[chunk] = reinterpret_cast<Tc *>(
_mm_malloc((uint64_t)(nnzmax) * (uint64_t)sizeof(Tc), 64));
jc_t[chunk] = reinterpret_cast<int *>(
_mm_malloc((uint64_t)(nnzmax) * (uint64_t)sizeof(int), 64));
int cnz_cnt = 0;
for (int row = start_row; row < end_row; row++) {
(*ic)[row] = cnz_cnt + 1;
// Merge c row and tc row into new_c row
int Astart = ia[row];
int Aend = ia[row + 1];
int Bstart = ib[row];
int Bend = ib[row + 1];
while ((Astart < Aend) || (Bstart < Bend)) {
int Acol = (Astart != Aend) ? ja[Astart - 1] : INT_MAX;
int Bcol = (Bstart != Bend) ? jb[Bstart - 1] : INT_MAX;
if (Acol < Bcol) {
Astart++;
} else if (Bcol < Acol) {
Bstart++;
} else {
op_fp(a[Astart - 1], b[Bstart - 1], &(c_t[chunk][cnz_cnt]), vsp);
jc_t[chunk][cnz_cnt] = Acol;
cnz_cnt++;
Astart++;
Bstart++;
}
}
}
nnzs[chunk] = cnz_cnt;
} // for each chunk
} // pragma omp parallel
// Main thread allocates a large result array
int nnzc = 0;
for (int chunk = 0; chunk < nchunks; chunk++) {
int tmp = nnzs[chunk];
nnzs[chunk] = nnzc;
nnzc += tmp;
}
nnzs[nchunks] = nnzc;
(*c) = reinterpret_cast<Tc *>(
_mm_malloc((uint64_t)(nnzc) * (uint64_t)sizeof(Tc), 64));
(*jc) = reinterpret_cast<int *>(
_mm_malloc((uint64_t)(nnzc) * (uint64_t)sizeof(int), 64));
#pragma omp parallel num_threads(num_threads)
{
int tid = omp_get_thread_num();
#pragma omp for schedule(dynamic)
for (int chunk = 0; chunk < nchunks; chunk++) {
int start_row = chunk * chunksize;
int end_row = (chunk + 1) * chunksize;
if (end_row > m) end_row = m;
#pragma simd
for (int Arow = start_row; Arow < end_row; Arow++) {
(*ic)[Arow] += nnzs[chunk];
}
memcpy((*c) + nnzs[chunk], c_t[chunk],
(nnzs[chunk + 1] - nnzs[chunk]) * sizeof(Tc));
memcpy((*jc) + nnzs[chunk], jc_t[chunk],
(nnzs[chunk + 1] - nnzs[chunk]) * sizeof(int));
_mm_free(c_t[chunk]);
_mm_free(jc_t[chunk]);
}
} // pragma omp parallel
(*ic)[m] = nnzs[nchunks] + 1;
delete c_t;
delete jc_t;
_mm_free(nnzs);
}
#endif
#ifdef INTERSECT_NAIVE_SPA
bool cmp_int_intersect_naive(int i1, int i2) { return i1 < i2; }
template <typename Ta, typename Tb, typename Tc>
void my_dintersect(int m, int n, Ta *a, int *ja, int *ia, Tb *b, int *jb,
int *ib, Tc **c, int **jc, int **ic, Tc (*op_fp)(Ta, Tb)) {
Tc *Crow = reinterpret_cast<Tc *>(_mm_malloc(n * sizeof(Tc), 64));
int *Cidxs = reinterpret_cast<int *>(_mm_malloc(n * sizeof(int), 64));
bool *Cflags = reinterpret_cast<bool *>(_mm_malloc(n * sizeof(bool), 64));
memset(Crow, 0, n * sizeof(Tc));
memset(Cflags, 0, n * sizeof(bool));
int nnzA = ia[m] - 1;
int nnzB = ib[m] - 1;
int nnzmax = std::max(nnzA, nnzB);
(*ic) = reinterpret_cast<int *>(_mm_malloc((m + 1) * sizeof(int), 64));
(*c) = reinterpret_cast<Tc *>(
_mm_malloc((uint64_t)(nnzmax) * (uint64_t)sizeof(Tc), 64));
(*jc) = reinterpret_cast<int *>(
_mm_malloc((uint64_t)(nnzmax) * (uint64_t)sizeof(int), 64));
int cint_cnt = 0;
for (int Arow = 0; Arow < m; Arow++) {
int c_row_int_start = cint_cnt;
int Arow_nnz = 0;
(*ic)[Arow] = cint_cnt + 1;
for (int Anz_id = ia[Arow]; Anz_id < ia[Arow + 1]; Anz_id++) {
int Acol = ja[Anz_id - 1];
Cidxs[Arow_nnz] = Acol - 1;
Cflags[Acol - 1] = true;
Crow[Acol - 1] = a[Anz_id - 1];
Arow_nnz++;
}
for (int Bnz_id = ib[Arow]; Bnz_id < ib[Arow + 1]; Bnz_id++) {
int Bcol = jb[Bnz_id - 1];
if (Cflags[Bcol - 1]) {
(*jc)[cint_cnt] = Bcol;
cint_cnt++;
Crow[Bcol - 1] = op_fp(Crow[Bcol - 1], b[Bnz_id - 1], vsp);
}
}
#ifdef SORTED
std::sort((*jc) + c_row_int_start, (*jc) + cint_cnt,
cmp_int_intersect_naive);
#endif
for (int Cnz_id = 0; Cnz_id < Arow_nnz; Cnz_id++) {
Cflags[Cidxs[Cnz_id]] = 0;
}
for (int Cnz_id = c_row_int_start; Cnz_id < cint_cnt; Cnz_id++) {
int Ccol = (*jc)[Cnz_id];
(*c)[Cnz_id] = Crow[Ccol - 1];
}
}
(*ic)[m] = cint_cnt + 1;
_mm_free(Cidxs);
_mm_free(Crow);
_mm_free(Cflags);
}
#endif
#ifdef INTERSECT_PARALLEL_SPA
bool cmp_int_intersect_parallel(int i1, int i2) { return i1 < i2; }
template <typename Ta, typename Tb, typename Tc>
void my_dintersect(int m, int n, Ta *a, int *ja, int *ia, Tb *b, int *jb,
int *ib, Tc **c, int **jc, int **ic, void (*op_fp)(Ta, Tb, Tc*, void*), void* vsp) {
int num_threads = omp_get_max_threads();
assert(num_threads <= omp_get_max_threads());
(*ic) = reinterpret_cast<int *>(_mm_malloc((m + 1) * sizeof(int), 64));
int nchunks = num_threads;
int chunksize = (m + nchunks - 1) / nchunks;
int *nnzs =
reinterpret_cast<int *>(_mm_malloc((nchunks + 1) * sizeof(int), 64));
memset(nnzs, 0, num_threads * sizeof(int));
Tc **c_t = new Tc *[nchunks];
int **jc_t = new int *[nchunks];
Tc **Crow = new Tc *[num_threads];
int **Cidxs = new int *[num_threads];
bool **Cflags = new bool *[num_threads];
#pragma omp parallel num_threads(num_threads)
{
int tid = omp_get_thread_num();
Crow[tid] = reinterpret_cast<Tc *>(_mm_malloc(n * sizeof(Tc), 64));
Cidxs[tid] = reinterpret_cast<int *>(_mm_malloc(n * sizeof(Tc), 64));
Cflags[tid] = reinterpret_cast<bool *>(_mm_malloc(n * sizeof(bool), 64));
memset(Crow[tid], 0, n * sizeof(Tc));
memset(Cidxs[tid], 0, n * sizeof(int));
memset(Cflags[tid], 0, n * sizeof(bool));
#pragma omp for schedule(dynamic)
for (int chunk = 0; chunk < nchunks; chunk++) {
int start_row = chunk * chunksize;
int end_row = (chunk + 1) * chunksize;
if (end_row > m) end_row = m;
// Determine number of nonzeros
int nnzA = ia[end_row] - ia[start_row];
int nnzB = ib[end_row] - ib[start_row];
int nnzmax = std::max(nnzA, nnzB);
// Allocate space for nonzeros
c_t[chunk] = reinterpret_cast<Tc *>(
_mm_malloc((uint64_t)(nnzmax) * (uint64_t)sizeof(Tc), 64));
jc_t[chunk] = reinterpret_cast<int *>(
_mm_malloc((uint64_t)(nnzmax) * (uint64_t)sizeof(int), 64));
int cint_cnt = 0;
for (int row = start_row; row < end_row; row++) {
(*ic)[row] = cint_cnt + 1;
int c_row_int_start = cint_cnt;
int Arow_nnz = 0;
for (int Anz_id = ia[row]; Anz_id < ia[row + 1]; Anz_id++) {
int Acol = ja[Anz_id - 1];
Cidxs[tid][Arow_nnz] = Acol - 1;
Cflags[tid][Acol - 1] = true;
Crow[tid][Acol - 1] = a[Anz_id - 1];
Arow_nnz++;
}
for (int Bnz_id = ib[row]; Bnz_id < ib[row + 1]; Bnz_id++) {
int Bcol = jb[Bnz_id - 1];
if (Cflags[tid][Bcol - 1]) {
jc_t[chunk][cint_cnt] = Bcol;
cint_cnt++;
op_fp(Crow[tid][Bcol - 1], b[Bnz_id - 1], &(Crow[tid][Bcol-1]), vsp);
}
}
#ifdef SORTED
std::sort(jc_t[chunk] + c_row_int_start, jc_t[chunk] + cint_cnt,
cmp_int_intersect_parallel);
#endif
for (int Cnz_id = 0; Cnz_id < Arow_nnz; Cnz_id++) {
Cflags[tid][Cidxs[tid][Cnz_id]] = 0;
}
for (int Cnz_id = c_row_int_start; Cnz_id < cint_cnt; Cnz_id++) {
int Ccol = jc_t[chunk][Cnz_id];
c_t[chunk][Cnz_id] = Crow[tid][Ccol - 1];
}
}
nnzs[chunk] = cint_cnt;
} // for each chunk
} // pragma omp parallel
// Main thread allocates a large result array
int nnzc = 0;
for (int chunk = 0; chunk < nchunks; chunk++) {
int tmp = nnzs[chunk];
nnzs[chunk] = nnzc;
nnzc += tmp;
}
nnzs[nchunks] = nnzc;
(*c) = reinterpret_cast<Tc *>(
_mm_malloc((uint64_t)(nnzc) * (uint64_t)sizeof(Tc), 64));
(*jc) = reinterpret_cast<int *>(
_mm_malloc((uint64_t)(nnzc) * (uint64_t)sizeof(int), 64));
#pragma omp parallel num_threads(num_threads)
{
int tid = omp_get_thread_num();
#pragma omp for schedule(dynamic)
for (int chunk = 0; chunk < nchunks; chunk++) {
int start_row = chunk * chunksize;
int end_row = (chunk + 1) * chunksize;
if (end_row > m) end_row = m;
#pragma simd
for (int Arow = start_row; Arow < end_row; Arow++) {
(*ic)[Arow] += nnzs[chunk];
}
memcpy((*c) + nnzs[chunk], c_t[chunk],
(nnzs[chunk + 1] - nnzs[chunk]) * sizeof(Tc));
memcpy((*jc) + nnzs[chunk], jc_t[chunk],
(nnzs[chunk + 1] - nnzs[chunk]) * sizeof(int));
_mm_free(c_t[chunk]);
_mm_free(jc_t[chunk]);
}
} // pragma omp parallel
(*ic)[m] = nnzs[nchunks] + 1;
delete c_t;
delete jc_t;
_mm_free(nnzs);
}
#endif
template <typename Ta, typename Tb, typename Tc>
void intersect_dense_segment(Ta* v1, int * bv1, int * nnz, int num_ints, Tb * v2, int * bv2, Tc * v3, int * bv3,
void (*op_fp)(Ta, Tb, Tc*, void*), void* vsp) {
#pragma omp parallel for
for(int i = 0 ; i < num_ints ; i++)
{
bv3[i] = bv1[i] & bv2[i];
}
int tmp_nnz = 0;
#pragma omp parallel for reduction(+:tmp_nnz)
for(int ii = 0 ; ii < num_ints ; ii++)
{
int cnt = _popcnt32(bv3[ii]);
if(cnt == 0) continue;
tmp_nnz += cnt;
for(int i = ii*32 ; i < (ii+1)*32 ; i++)
{
if(get_bitvector(i, bv3))
{
Ta tmp = v1[i];
op_fp(v1[i], v2[i], &(v3[i]), vsp);
}
}
}
*nnz = tmp_nnz;
}
#endif // SRC_SINGLENODE_INTERSECTREDUCE_H_
|
doacross-1.c | extern void abort (void);
#define N 256
int a[N], b[N / 16][8][4], c[N / 32][8][8];
volatile int d, e;
int
main ()
{
int i, j, k, l, m;
#pragma omp parallel private (l)
{
#pragma omp for schedule(static, 1) ordered (1) nowait
for (i = 0; i < N; i++)
{
#pragma omp atomic write
a[i] = 1;
#pragma omp ordered depend(sink: i - 1)
if (i)
{
#pragma omp atomic read
l = a[i - 1];
if (l < 2)
abort ();
}
#pragma omp atomic write
a[i] = 2;
if (i < N - 1)
{
#pragma omp atomic read
l = a[i + 1];
if (l == 3)
abort ();
}
#pragma omp ordered depend(source)
#pragma omp atomic write
a[i] = 3;
}
#pragma omp for schedule(static) ordered (3) nowait
for (i = 2; i < N / 16 - 1; i++)
for (j = 0; j < 8; j += 2)
for (k = 1; k <= 3; k++)
{
#pragma omp atomic write
b[i][j][k] = 1;
#pragma omp ordered depend(sink: i, j - 2, k - 1) \
depend(sink: i - 2, j - 2, k + 1)
#pragma omp ordered depend(sink: i - 3, j + 2, k - 2)
if (j >= 2 && k > 1)
{
#pragma omp atomic read
l = b[i][j - 2][k - 1];
if (l < 2)
abort ();
}
#pragma omp atomic write
b[i][j][k] = 2;
if (i >= 4 && j >= 2 && k < 3)
{
#pragma omp atomic read
l = b[i - 2][j - 2][k + 1];
if (l < 2)
abort ();
}
if (i >= 5 && j < N / 16 - 3 && k == 3)
{
#pragma omp atomic read
l = b[i - 3][j + 2][k - 2];
if (l < 2)
abort ();
}
#pragma omp ordered depend(source)
#pragma omp atomic write
b[i][j][k] = 3;
}
#define A(n) int n;
#define B(n) A(n##0) A(n##1) A(n##2) A(n##3)
#define C(n) B(n##0) B(n##1) B(n##2) B(n##3)
#define D(n) C(n##0) C(n##1) C(n##2) C(n##3)
D(m)
#undef A
#pragma omp for collapse (2) ordered(61) schedule(dynamic, 15)
for (i = 0; i < N / 32; i++)
for (j = 7; j > 1; j--)
for (k = 6; k >= 0; k -= 2)
#define A(n) for (n = 4; n < 5; n++)
D(m)
#undef A
{
#pragma omp atomic write
c[i][j][k] = 1;
#define A(n) ,n
#define E(n) C(n##0) C(n##1) C(n##2) B(n##30) B(n##31) A(n##320) A(n##321)
#pragma omp ordered depend (sink: i, j, k + 2 E(m)) \
depend (sink:i - 2, j + 1, k - 4 E(m)) \
depend(sink: i - 1, j - 2, k - 2 E(m))
if (k <= 4)
{
#pragma omp atomic read
l = c[i][j][k + 2];
if (l < 2)
abort ();
}
#pragma omp atomic write
c[i][j][k] = 2;
if (i >= 2 && j < 7 && k >= 4)
{
#pragma omp atomic read
l = c[i - 2][j + 1][k - 4];
if (l < 2)
abort ();
}
if (i >= 1 && j >= 4 && k >= 2)
{
#pragma omp atomic read
l = c[i - 1][j - 2][k - 2];
if (l < 2)
abort ();
}
#pragma omp ordered depend (source)
#pragma omp atomic write
c[i][j][k] = 3;
}
#pragma omp for collapse(2) ordered(4) lastprivate (i, j, k)
for (i = 0; i < d + 1; i++)
for (j = d + 1; j >= 0; j--)
for (k = 0; k < d; k++)
for (l = 0; l < d + 2; l++)
{
#pragma omp ordered depend (source)
#pragma omp ordered depend (sink:i - 2, j + 2, k - 2, l)
if (!e)
abort ();
}
#pragma omp single
{
if (i != 1 || j != -1 || k != 0)
abort ();
i = 8; j = 9; k = 10;
}
#pragma omp for collapse(2) ordered(4) lastprivate (i, j, k, m)
for (i = 0; i < d + 1; i++)
for (j = d + 1; j >= 0; j--)
for (k = 0; k < d + 2; k++)
for (m = 0; m < d; m++)
{
#pragma omp ordered depend (source)
#pragma omp ordered depend (sink:i - 2, j + 2, k - 2, m)
abort ();
}
#pragma omp single
if (i != 1 || j != -1 || k != 2 || m != 0)
abort ();
#pragma omp for collapse(2) ordered(4) nowait
for (i = 0; i < d + 1; i++)
for (j = d; j > 0; j--)
for (k = 0; k < d + 2; k++)
for (l = 0; l < d + 4; l++)
{
#pragma omp ordered depend (source)
#pragma omp ordered depend (sink:i - 2, j + 2, k - 2, l)
if (!e)
abort ();
}
#pragma omp for nowait
for (i = 0; i < N; i++)
if (a[i] != 3)
abort ();
#pragma omp for collapse(2) private(k) nowait
for (i = 0; i < N / 16; i++)
for (j = 0; j < 8; j++)
for (k = 0; k < 4; k++)
if (b[i][j][k] != 3 * (i >= 2 && i < N / 16 - 1 && (j & 1) == 0 && k >= 1))
abort ();
#pragma omp for collapse(3) nowait
for (i = 0; i < N / 32; i++)
for (j = 0; j < 8; j++)
for (k = 0; k < 8; k++)
if (c[i][j][k] != 3 * (j >= 2 && (k & 1) == 0))
abort ();
}
return 0;
}
|
GB_unop__identity_int8_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_int8_fp64
// op(A') function: GB_unop_tran__identity_int8_fp64
// C type: int8_t
// A type: double
// cast: int8_t cij = GB_cast_to_int8_t ((double) (aij))
// unaryop: cij = aij
#define GB_ATYPE \
double
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int8_t z = GB_cast_to_int8_t ((double) (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int8_t z = GB_cast_to_int8_t ((double) (aij)) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_int8_fp64
(
int8_t *Cx, // Cx and Ax may be aliased
const double *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
int8_t z = GB_cast_to_int8_t ((double) (aij)) ;
Cx [p] = z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_int8_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__lnot_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_fp32_fp32
// op(A') function: GB_tran__lnot_fp32_fp32
// C type: float
// A type: float
// cast: float cij = (float) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
float z = (float) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_fp32_fp32
(
float *restrict Cx,
const float *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_fp32_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
merge.c | /**********************************************************/
/**
* 234Compositor - Image data merging library
*
* Copyright (c) 2013-2015 Advanced Institute for Computational Science, RIKEN.
* All rights reserved.
*
**/
/**********************************************************/
// @file merge.c
// @brief Image data merging routines for 234Compositor
// @author Jorji Nonaka (jorji@riken.jp)
#ifndef COMPOSITOR234_H_INCLUDE
#include "234compositor.h"
#define COMPOSITOR234_H_INCLUDE
#endif
#ifndef COMPOSITOR234_MISC_H_INCLUDE
#include "misc.h"
#define COMPOSITOR234_MISC_H_INCLUDE
#endif
#include "merge.h"
/*========================================================*/
/**
* @brief Alpha-blend compositing (RGBA32 Pixels)
* Back-to-Front Order
*
* Consider Pre-multiplied (Weighted) images
*
* @param over_image [in] Image to be alpha blended
* @param under_image [in] Image to be alpha blended
* @param blend_image [in] Image to be alpha blended
* @param image_size [in] Image size
*/
/*========================================================*/
#ifdef C99
int composite_alpha_rgba32 \
( BYTE* restrict over_image, \
BYTE* restrict under_image, \
BYTE* restrict blend_image, \
unsigned int image_size )
#else
int composite_alpha_rgba32 \
( BYTE* over_image, \
BYTE* under_image, \
BYTE* blend_image, \
unsigned int image_size )
#endif
{
unsigned int i;
unsigned int full_image_size;
BYTE* blend_image_ptr;
BYTE under_r;
BYTE under_g;
BYTE under_b;
BYTE under_a;
BYTE over_r;
BYTE over_g;
BYTE over_b;
BYTE over_a;
BYTE blend_r;
BYTE blend_g;
BYTE blend_b;
BYTE blend_a;
float one_minus_alpha;
blend_image_ptr = (BYTE *)blend_image;
//=====================================
// Shared Memory Parallelism
//=====================================
full_image_size = image_size * RGBA32;
#if defined ( _OPENMP )
#pragma omp parallel for \
private( i, one_minus_alpha, \
over_r, over_g, over_b, over_a, \
under_r, under_g, under_b, under_a, \
blend_r, blend_g, blend_b, blend_a )
#endif
for ( i = 0; i < full_image_size; i += RGBA ) // SKIP 4 elements
{
over_a = (BYTE)over_image[ i + 3 ];
under_a = (BYTE)under_image[ i + 3 ];
if ( over_a == 0 ) {
blend_a = under_a;
blend_r = (BYTE)under_image[ i ];
blend_g = (BYTE)under_image[ i + 1 ];
blend_b = (BYTE)under_image[ i + 2 ];
}
else if ( over_a == 255 ) {
blend_a = over_a;
blend_r = (BYTE)over_image[ i ];
blend_g = (BYTE)over_image[ i + 1 ];
blend_b = (BYTE)over_image[ i + 2 ];
}
else {
// Separate R, G, B and A values of both
// the foreground and background colors
over_r = (BYTE)over_image[ i ];
over_g = (BYTE)over_image[ i + 1 ];
over_b = (BYTE)over_image[ i + 2 ];
under_r = (BYTE)under_image[ i ];
under_g = (BYTE)under_image[ i + 1 ];
under_b = (BYTE)under_image[ i + 2 ];
// Pre-calculate 1 - Src_A
one_minus_alpha = (float)( 1.0f - ( over_a / 255.0f ));
// =======================================================
blend_a = saturate_add( over_a, (BYTE)( under_a * one_minus_alpha ));
blend_r = saturate_add( over_r, (BYTE)( under_r * one_minus_alpha ));
blend_g = saturate_add( over_g, (BYTE)( under_g * one_minus_alpha ));
blend_b = saturate_add( over_b, (BYTE)( under_b * one_minus_alpha ));
// =======================================================
}
blend_image_ptr[ i ] = (BYTE)( blend_r );
blend_image_ptr[ i + 1 ] = (BYTE)( blend_g );
blend_image_ptr[ i + 2 ] = (BYTE)( blend_b );
blend_image_ptr[ i + 3 ] = (BYTE)( blend_a );
}
return EXIT_SUCCESS;
}
#ifdef C99
int composite_alpha_rgbaz64 \
( BYTE* restrict over_image, \
BYTE* restrict under_image, \
BYTE* restrict blend_image, \
unsigned int image_size )
#else
int composite_alpha_rgbaz64 \
( BYTE* over_image, \
BYTE* under_image, \
BYTE* blend_image, \
unsigned int image_size )
#endif
{
unsigned int i;
unsigned int full_image_size;
BYTE* over_image_ptr;
BYTE* under_image_ptr;
BYTE* blend_image_ptr;
float* over_image_f_ptr;
float* under_image_f_ptr;
float* blend_image_f_ptr;
BYTE over_r;
BYTE over_g;
BYTE over_b;
BYTE over_a;
BYTE under_r;
BYTE under_g;
BYTE under_b;
BYTE under_a;
float over_z_f;
float under_z_f;
BYTE blend_r;
BYTE blend_g;
BYTE blend_b;
BYTE blend_a;
float blend_z;
blend_image_ptr = (BYTE *)blend_image;
over_image_ptr = (BYTE *)over_image;
under_image_ptr = (BYTE *)under_image;
blend_image_f_ptr = (float *)blend_image;
over_image_f_ptr = (float *)over_image;
under_image_f_ptr = (float *)under_image;
full_image_size = image_size * RGBAZ64; // 8 BYTES
//=====================================
// Shared Memory Parallelism
//=====================================
full_image_size = image_size * RGBAZ64; // 8 BYTES
#if defined ( _OPENMP )
#pragma omp parallel for \
private( i, \
over_r, over_g, over_b, over_a, over_z_f, \
under_r, under_g, under_b, under_a, under_z_f,\
blend_r, blend_g, blend_b, blend_a, blend_z )
#endif
for ( i = 0; i < full_image_size; i += RGBAZ64 ) // SKIP 8 BYTES
{
over_r = (BYTE)over_image_ptr[ i ];
over_g = (BYTE)over_image_ptr[ i + 1 ];
over_b = (BYTE)over_image_ptr[ i + 2 ];
over_a = (BYTE)over_image_ptr[ i + 3 ];
over_image_f_ptr = (float *)&over_image_ptr[ i + 4 ];
over_z_f = (float)over_image_f_ptr[ 0 ];
under_r = (BYTE)under_image_ptr[ i ];
under_g = (BYTE)under_image_ptr[ i + 1 ];
under_b = (BYTE)under_image_ptr[ i + 2 ];
under_a = (BYTE)under_image_ptr[ i + 3 ];
under_image_f_ptr = (float *)&under_image_ptr[ i + 4 ];
under_z_f = (float)under_image_f_ptr[ 0 ];
// Depth sorting if necessary
if ( over_z_f > under_z_f )
{
blend_r = under_r;
blend_g = under_g;
blend_b = under_b;
blend_a = under_a;
blend_z = under_z_f;
}
else
{
blend_r = over_r;
blend_g = over_g;
blend_b = over_b;
blend_a = over_a;
blend_z = over_z_f;
}
blend_image_ptr[ i ] = (BYTE)( blend_r );
blend_image_ptr[ i + 1 ] = (BYTE)( blend_g );
blend_image_ptr[ i + 2 ] = (BYTE)( blend_b );
blend_image_ptr[ i + 3 ] = (BYTE)( blend_a );
blend_image_f_ptr = (float *)&blend_image_ptr[ i + 4 ];
blend_image_f_ptr[ 0 ] = (float)blend_z;
}
return EXIT_SUCCESS;
}
/*========================================================*/
/**
* @brief Alpha-blend compositing (RGBA32 Pixels)
* Back-to-Front Order
*
* @param over_image [in] Image to be alpha blended
* @param under_image [in] Image to be alpha blended
* @param blend_image [in] Image to be alpha blended
* @param image_size [in] Image size
*/
/*========================================================*/
#ifdef C99
int composite_alpha_rgba32f \
( BYTE* restrict over_image, \
BYTE* restrict under_image, \
BYTE* restrict blend_image, \
unsigned int image_size )
#else
int composite_alpha_rgba32f \
( BYTE* over_image, \
BYTE* under_image, \
BYTE* blend_image, \
unsigned int image_size )
#endif
{
unsigned int i;
unsigned int full_image_size;
BYTE* blend_image_ptr;
BYTE under_r;
BYTE under_g;
BYTE under_b;
BYTE under_a;
BYTE over_r;
BYTE over_g;
BYTE over_b;
BYTE over_a;
float over_r_f;
float over_g_f;
float over_b_f;
float over_a_f;
float under_r_f;
float under_g_f;
float under_b_f;
float under_a_f;
float blend_r;
float blend_g;
float blend_b;
float blend_a;
float one_minus_alpha;
blend_image_ptr = (BYTE *)blend_image;
//=====================================
// Shared Memory Parallelism
//=====================================
full_image_size = image_size * RGBA32;
#if defined ( _OPENMP )
#pragma omp parallel for \
private( i, one_minus_alpha, \
over_r, over_g, over_b, over_a, \
under_r, under_g, under_b, under_a, \
blend_r, blend_g, blend_b, blend_a )
#endif
for ( i = 0; i < full_image_size; i += RGBA ) // SKIP 4 elements
{
// Separate R, G, B and A values of both
// the foreground and background colors
over_r = (BYTE)over_image[ i ];
over_g = (BYTE)over_image[ i + 1 ];
over_b = (BYTE)over_image[ i + 2 ];
over_a = (BYTE)over_image[ i + 3 ];
under_r = (BYTE)under_image[ i ];
under_g = (BYTE)under_image[ i + 1 ];
under_b = (BYTE)under_image[ i + 2 ];
under_a = (BYTE)under_image[ i + 3 ];
// Convert BYTE to Float (Range: 0.0 - 1.0)
over_r_f = (float)( over_r / 255.0f );
over_g_f = (float)( over_g / 255.0f );
over_b_f = (float)( over_b / 255.0f );
over_a_f = (float)( over_a / 255.0f );
under_r_f = (float)( under_r / 255.0f );
under_g_f = (float)( under_g / 255.0f );
under_b_f = (float)( under_b / 255.0f );
under_a_f = (float)( under_a / 255.0f );
// Pre-calculate 1 - Src_A
one_minus_alpha = (float)( 1.0f - over_a_f );
// =======================================================
blend_a = (float)( over_a_f + ( under_a_f * one_minus_alpha ));
blend_r = (float)( over_r_f + ( under_r_f * one_minus_alpha ));
blend_g = (float)( over_g_f + ( under_g_f * one_minus_alpha ));
blend_b = (float)( over_b_f + ( under_b_f * one_minus_alpha ));
// Clamp RGB component values if necessary
blend_r = clamp_float( blend_r, 0.0f, 1.0f );
blend_g = clamp_float( blend_g, 0.0f, 1.0f );
blend_b = clamp_float( blend_b, 0.0f, 1.0f );
blend_a = clamp_float( blend_a, 0.0f, 1.0f );
blend_image_ptr[ i ] = (BYTE)( blend_r * 255 );
blend_image_ptr[ i + 1 ] = (BYTE)( blend_g * 255 );
blend_image_ptr[ i + 2 ] = (BYTE)( blend_b * 255 );
blend_image_ptr[ i + 3 ] = (BYTE)( blend_a * 255 );
}
return EXIT_SUCCESS;
}
/*========================================================*/
/**
* @brief Alpha-blend compositing (RGBAZ64 Pixels)
* Back-to-Front Order
*
* @param over_image [in] Image to be alpha blended
* @param under_image [in] Image to be alpha blended
* @param blend_image [in] Image to be alpha blended
* @param image_size [in] Image size
*/
/*========================================================*/
#ifdef C99
int composite_alpha_rgbaz64f \
( BYTE* restrict over_image, \
BYTE* restrict under_image, \
BYTE* restrict blend_image, \
unsigned int image_size )
#else
int composite_alpha_rgbaz64f \
( BYTE* over_image, \
BYTE* under_image, \
BYTE* blend_image, \
unsigned int image_size )
#endif
{
unsigned int i;
unsigned int full_image_size;
BYTE* over_image_ptr;
BYTE* under_image_ptr;
BYTE* blend_image_ptr;
float* over_image_f_ptr;
float* under_image_f_ptr;
float* blend_image_f_ptr;
BYTE over_r;
BYTE over_g;
BYTE over_b;
BYTE over_a;
BYTE under_r;
BYTE under_g;
BYTE under_b;
BYTE under_a;
float over_r_f;
float over_g_f;
float over_b_f;
float over_a_f;
float over_z_f;
float under_r_f;
float under_g_f;
float under_b_f;
float under_a_f;
float under_z_f;
float blend_r;
float blend_g;
float blend_b;
float blend_a;
float blend_z;
float one_minus_alpha;
blend_image_ptr = (BYTE *)blend_image;
over_image_ptr = (BYTE *)over_image;
under_image_ptr = (BYTE *)under_image;
blend_image_f_ptr = (float *)blend_image;
over_image_f_ptr = (float *)over_image;
under_image_f_ptr = (float *)under_image;
full_image_size = image_size * RGBAZ64; // 8 BYTES
//=====================================
// Shared Memory Parallelism
//=====================================
full_image_size = image_size * RGBAZ64; // 8 BYTES
#if defined ( _OPENMP )
#pragma omp parallel for \
private( i, one_minus_alpha, \
over_r, over_g, over_b, over_a, over_z_f, \
under_r, under_g, under_b, under_a, under_z_f,\
blend_r, blend_g, blend_b, blend_a, blend_z )
#endif
for ( i = 0; i < full_image_size; i += RGBAZ64 ) // SKIP 8 BYTES
{
over_r = (BYTE)over_image_ptr[ i ];
over_g = (BYTE)over_image_ptr[ i + 1 ];
over_b = (BYTE)over_image_ptr[ i + 2 ];
over_a = (BYTE)over_image_ptr[ i + 3 ];
over_image_f_ptr = (float *)&over_image_ptr[ i + 4 ];
over_z_f = (float)over_image_f_ptr[ 0 ];
under_r = (BYTE)under_image_ptr[ i ];
under_g = (BYTE)under_image_ptr[ i + 1 ];
under_b = (BYTE)under_image_ptr[ i + 2 ];
under_a = (BYTE)under_image_ptr[ i + 3 ];
under_image_f_ptr = (float *)&under_image_ptr[ i + 4 ];
under_z_f = (float)under_image_f_ptr[ 0 ];
// Depth sorting if necessary
if ( over_z_f > under_z_f )
{
blend_r = over_r;
blend_g = over_g;
blend_b = over_b;
blend_a = over_a;
blend_z = over_z_f;
over_r = under_r;
over_g = under_g;
over_b = under_b;
over_a = under_a;
over_z_f = under_z_f;
under_r = blend_r;
under_g = blend_g;
under_b = blend_b;
under_a = blend_a;
under_z_f = blend_z;
}
// Convert BYTE to Float (Range: 0.0 - 1.0)
over_r_f = (float)( over_r / 255.0f );
over_g_f = (float)( over_g / 255.0f );
over_b_f = (float)( over_b / 255.0f );
over_a_f = (float)( over_a / 255.0f) ;
under_r_f = (float)( under_r / 255.0f );
under_g_f = (float)( under_g / 255.0f );
under_b_f = (float)( under_b / 255.0f );
under_a_f = (float)( under_a / 255.0f );
// Pre-calculate 1 - Src_A
one_minus_alpha = 1.0f - over_a_f;
// Calculate Final alpha value
blend_a = (float)( over_a_f + ( under_a_f * one_minus_alpha ));
blend_r = (float)( over_r_f + ( under_r_f * one_minus_alpha ));
blend_g = (float)( over_g_f + ( under_g_f * one_minus_alpha ));
blend_b = (float)( over_b_f + ( under_b_f * one_minus_alpha ));
// Clamp RGB component values if necessary
blend_r = clamp_float( blend_r, 0.0, 1.0 );
blend_g = clamp_float( blend_g, 0.0, 1.0 );
blend_b = clamp_float( blend_b, 0.0, 1.0 );
blend_a = clamp_float( blend_a, 0.0, 1.0 );
blend_image_ptr[ i ] = (BYTE)( blend_r * 255 );
blend_image_ptr[ i + 1 ] = (BYTE)( blend_g * 255 );
blend_image_ptr[ i + 2 ] = (BYTE)( blend_b * 255 );
blend_image_ptr[ i + 3 ] = (BYTE)( blend_a * 255 );
blend_image_f_ptr = (float *)&blend_image_ptr[ i + 4 ];
blend_image_f_ptr[ 0 ] = (float)over_z_f;
}
return EXIT_SUCCESS;
}
/*========================================================*/
/**
* @brief Alpha-blend compositing (RGBA56 Pixels)
* Back-to-Front Order
*
* @param over_image [in] Image to be alpha blended
* @param under_image [in] Image to be alpha blended
* @param blend_image [in] Image to be alpha blended
* @param image_size [in] Image size
*/
/*========================================================*/
#ifdef C99
int composite_alpha_rgba56 \
( BYTE* restrict over_image, \
BYTE* restrict under_image, \
BYTE* restrict blend_image, \
unsigned int image_size )
#else
int composite_alpha_rgba56 \
( BYTE* over_image, \
BYTE* under_image, \
BYTE* blend_image, \
unsigned int image_size )
#endif
{
unsigned int i;
unsigned int full_image_size;
BYTE* blend_image_ptr;
float* over_image_f_ptr;
float* under_image_f_ptr;
float* blend_image_f_ptr;
BYTE under_r;
BYTE under_g;
BYTE under_b;
float under_a;
BYTE over_r;
BYTE over_g;
BYTE over_b;
float over_a;
float over_r_f;
float over_g_f;
float over_b_f;
float over_a_f;
float under_r_f;
float under_g_f;
float under_b_f;
float under_a_f;
float blend_r;
float blend_g;
float blend_b;
float blend_a;
float one_minus_alpha;
blend_image_ptr = (BYTE *)blend_image;
blend_image_f_ptr = (float *)blend_image;
over_image_f_ptr = (float *)over_image;
under_image_f_ptr = (float *)under_image;
//=====================================
// Shared Memory Parallelism
//=====================================
full_image_size = image_size * RGBA56;
#if defined ( _OPENMP )
#pragma omp parallel for \
private( i, one_minus_alpha, \
over_r, over_g, over_b, over_a, \
under_r, under_g, under_b, under_a, \
blend_r, blend_g, blend_b, blend_a )
#endif
for ( i = 0; i < full_image_size; i += RGBA56 ) // SKIP 7 elements
{
// Separate R, G, B and A values of both
// the foreground and background colors
over_r = (BYTE)over_image[ i ];
over_g = (BYTE)over_image[ i + 1 ];
over_b = (BYTE)over_image[ i + 2 ];
over_image_f_ptr = (float *)&over_image[ i + 3 ];
over_a = (float)*over_image_f_ptr;
under_r = (BYTE)under_image[ i ];
under_g = (BYTE)under_image[ i + 1 ];
under_b = (BYTE)under_image[ i + 2 ];
under_image_f_ptr = (float *)&under_image[ i + 3 ];
under_a = (float)*under_image_f_ptr;
// Convert BYTE to Float (Range: 0.0 - 1.0)
over_r_f = (float)( over_r / 255.0f );
over_g_f = (float)( over_g / 255.0f );
over_b_f = (float)( over_b / 255.0f );
over_a_f = (float) over_a ;
under_r_f = (float)( under_r / 255.0f );
under_g_f = (float)( under_g / 255.0f );
under_b_f = (float)( under_b / 255.0f );
under_a_f = (float) under_a ;
// Pre-calculate 1 - Src_A
one_minus_alpha = 1.0f - over_a_f;
// Calculate Final alpha value
blend_a = (float)( over_a_f + ( under_a_f * one_minus_alpha ));
blend_r = (float)( over_r_f + ( under_r_f * one_minus_alpha ));
blend_g = (float)( over_g_f + ( under_g_f * one_minus_alpha ));
blend_b = (float)( over_b_f + ( under_b_f * one_minus_alpha ));
// =======================================================
// Clamp RGB component values if necessary
blend_r = clamp_float( blend_r, 0.0, 1.0 );
blend_g = clamp_float( blend_g, 0.0, 1.0 );
blend_b = clamp_float( blend_b, 0.0, 1.0 );
blend_a = clamp_float( blend_a, 0.0, 1.0 );
blend_image_ptr[ i ] = (BYTE)( blend_r * 255 );
blend_image_ptr[ i + 1 ] = (BYTE)( blend_g * 255 );
blend_image_ptr[ i + 2 ] = (BYTE)( blend_b * 255 );
blend_image_f_ptr = (float *)&blend_image_ptr[ i + 3 ];
*blend_image_f_ptr = (float) blend_a;
}
return EXIT_SUCCESS;
}
/*========================================================*/
/**
* @brief Alpha-blend compositing (RGBA64 Pixels)
* Back-to-Front Order
*
* @param over_image [in] Image to be alpha blended
* @param under_image [in] Image to be alpha blended
* @param blend_image [in] Image to be alpha blended
* @param image_size [in] Image size
*/
/*========================================================*/
#ifdef C99
int composite_alpha_rgba64 \
( BYTE* restrict over_image, \
BYTE* restrict under_image, \
BYTE* restrict blend_image, \
unsigned int image_size )
#else
int composite_alpha_rgba64 \
( BYTE* over_image, \
BYTE* under_image, \
BYTE* blend_image, \
unsigned int image_size )
#endif
{
unsigned int i;
unsigned int full_image_size;
BYTE* blend_image_ptr;
float* over_image_f_ptr;
float* under_image_f_ptr;
float* blend_image_f_ptr;
BYTE under_r;
BYTE under_g;
BYTE under_b;
float under_a;
BYTE over_r;
BYTE over_g;
BYTE over_b;
float over_a;
float over_r_f;
float over_g_f;
float over_b_f;
float over_a_f;
float under_r_f;
float under_g_f;
float under_b_f;
float under_a_f;
float blend_r;
float blend_g;
float blend_b;
float blend_a;
float one_minus_alpha;
blend_image_ptr = (BYTE *)blend_image;
blend_image_f_ptr = (float *)blend_image;
over_image_f_ptr = (float *)over_image;
under_image_f_ptr = (float *)under_image;
//=====================================
// Shared Memory Parallelism
//=====================================
full_image_size = image_size * RGBA64;
#if defined ( _OPENMP )
#pragma omp parallel for \
private( i, one_minus_alpha, \
over_r, over_g, over_b, over_a, \
under_r, under_g, under_b, under_a, \
blend_r, blend_g, blend_b, blend_a )
#endif
for ( i = 0; i < full_image_size; i += RGBA64 ) // SKIP 8 elements
{
// Separate R, G, B and A values of both
// the foreground and background colors
over_r = (BYTE)over_image[ i ];
over_g = (BYTE)over_image[ i + 1 ];
over_b = (BYTE)over_image[ i + 2 ];
over_image_f_ptr = (float *)&over_image[ i + 4 ];
over_a = (float)*over_image_f_ptr;
under_r = (BYTE)under_image[ i ];
under_g = (BYTE)under_image[ i + 1 ];
under_b = (BYTE)under_image[ i + 2 ];
under_image_f_ptr = (float *)&under_image[ i + 4 ];
under_a = (float)*under_image_f_ptr;
// Convert BYTE to Float (Range: 0.0 - 1.0)
over_r_f = (float)( over_r / 255.0f );
over_g_f = (float)( over_g / 255.0f );
over_b_f = (float)( over_b / 255.0f );
over_a_f = (float) over_a ;
under_r_f = (float)( under_r / 255.0f );
under_g_f = (float)( under_g / 255.0f );
under_b_f = (float)( under_b / 255.0f );
under_a_f = (float) under_a ;
// Pre-calculate 1 - Src_A
one_minus_alpha = 1.0f - over_a_f;
// Calculate Final alpha value
blend_a = (float) ( over_a_f + ( under_a_f * one_minus_alpha ));
blend_r = (float)( over_r_f + ( under_r_f * one_minus_alpha ));
blend_g = (float)( over_g_f + ( under_g_f * one_minus_alpha ));
blend_b = (float)( over_b_f + ( under_b_f * one_minus_alpha ));
// =======================================================
// Clamp RGB component values if necessary
blend_r = clamp_float( blend_r, 0.0, 1.0 );
blend_g = clamp_float( blend_g, 0.0, 1.0 );
blend_b = clamp_float( blend_b, 0.0, 1.0 );
blend_a = clamp_float( blend_a, 0.0, 1.0 );
blend_image_ptr[ i ] = (BYTE)( blend_r * 255 ); // R
blend_image_ptr[ i + 1 ] = (BYTE)( blend_g * 255 ); // G
blend_image_ptr[ i + 2 ] = (BYTE)( blend_b * 255 ); // B
blend_image_ptr[ i + 3 ] = (BYTE)0; // X
blend_image_f_ptr = (float *)&blend_image_ptr[ i + 4 ]; // A
*blend_image_f_ptr = (float) blend_a;
}
return EXIT_SUCCESS;
}
/*========================================================*/
/**
* @brief Alpha-blend compositing (RGBAZ88 Pixels)
* Back-to-Front Order
*
* @param over_image [in] Image to be alpha blended
* @param under_image [in] Image to be alpha blended
* @param blend_image [in] Image to be alpha blended
* @param image_size [in] Image size
*/
/*========================================================*/
#ifdef C99
int composite_alpha_rgbaz88 \
( BYTE* restrict over_image, \
BYTE* restrict under_image, \
BYTE* restrict blend_image, \
unsigned int image_size )
#else
int composite_alpha_rgbaz88 \
( BYTE* over_image, \
BYTE* under_image, \
BYTE* blend_image, \
unsigned int image_size )
#endif
{
unsigned int i;
unsigned int full_image_size;
BYTE* over_image_ptr;
BYTE* under_image_ptr;
BYTE* blend_image_ptr;
float* over_image_f_ptr;
float* under_image_f_ptr;
float* blend_image_f_ptr;
BYTE over_r;
BYTE over_g;
BYTE over_b;
BYTE under_r;
BYTE under_g;
BYTE under_b;
float over_r_f;
float over_g_f;
float over_b_f;
float over_a_f;
float over_z_f;
float under_r_f;
float under_g_f;
float under_b_f;
float under_a_f;
float under_z_f;
float blend_r;
float blend_g;
float blend_b;
float blend_a;
float blend_z;
float one_minus_alpha;
blend_image_ptr = (BYTE *)blend_image;
over_image_ptr = (BYTE *)over_image;
under_image_ptr = (BYTE *)under_image;
blend_image_f_ptr = (float *)blend_image;
over_image_f_ptr = (float *)over_image;
under_image_f_ptr = (float *)under_image;
blend_z = 0;
full_image_size = image_size * RGBAZ88; // 11 BYTES
//=====================================
// Depth Sorting
//=====================================
over_image_f_ptr = (float *)&over_image_ptr[ 3 ];
over_z_f = (float) over_image_f_ptr[ 1 ]; // Z
under_image_f_ptr = (float *)&under_image_ptr[ 3 ];
under_z_f = (float)under_image_f_ptr[ 1 ]; // Z
//=====================================
// Shared Memory Parallelism
//=====================================
#if defined ( _OPENMP )
#pragma omp parallel for \
private( i, one_minus_alpha, \
over_r_f, over_g_f, over_b_f, over_a_f, over_z_f, \
under_r, under_g, under_b_f, under_a_f, under_z_f,\
blend_r, blend_g, blend_b, blend_a, blend_z )
#endif
for ( i = 0; i < full_image_size; i += RGBAZ88 ) // SKIP 11 BYTES
{
over_r = (BYTE)over_image_ptr[ i ]; // R
over_g = (BYTE)over_image_ptr[ i + 1 ]; // G
over_b = (BYTE)over_image_ptr[ i + 2 ]; // B
over_image_f_ptr = (float *)&over_image_ptr[ i + 3 ];
over_a_f = (float)over_image_f_ptr[ 0 ]; // A
over_z_f = (float)over_image_f_ptr[ 1 ]; // Z
under_r = (BYTE)under_image_ptr[ i ]; // R
under_g = (BYTE)under_image_ptr[ i + 1 ]; // G
under_b = (BYTE)under_image_ptr[ i + 2 ]; // B
under_image_f_ptr = (float *)&under_image_ptr[ i + 3 ];
under_a_f = (float)under_image_f_ptr[ 0 ]; // A
under_z_f = (float)under_image_f_ptr[ 1 ]; // Z
// Depth sorting if necessary
if ( over_z_f > under_z_f )
{
blend_r = over_r;
blend_g = over_g;
blend_b = over_b;
blend_a = over_a_f;
blend_z = over_z_f;
over_r = under_r;
over_g = under_g;
over_b = under_b;
over_a_f = under_a_f;
over_z_f = under_z_f;
under_r = blend_r;
under_g = blend_g;
under_b = blend_b;
under_a_f = blend_a;
under_z_f = blend_z;
}
// Convert BYTE to Float (Range: 0.0 - 1.0)
over_r_f = (float)( over_r / 255.0f );
over_g_f = (float)( over_g / 255.0f );
over_b_f = (float)( over_b / 255.0f );
under_r_f = (float)( under_r / 255.0f );
under_g_f = (float)( under_g / 255.0f );
under_b_f = (float)( under_b / 255.0f );
// Pre-calculate 1 - Src_A
one_minus_alpha = 1.0f - over_a_f;
// Calculate Final alpha value
blend_a = (float) ( over_a_f + ( under_a_f * one_minus_alpha ));
blend_r = (float)( over_r_f + ( under_r_f * one_minus_alpha ));
blend_g = (float)( over_g_f + ( under_g_f * one_minus_alpha ));
blend_b = (float)( over_b_f + ( under_b_f * one_minus_alpha ));
// =======================================================
blend_r = clamp_float( blend_r, 0.0, 1.0 );
blend_g = clamp_float( blend_g, 0.0, 1.0 );
blend_b = clamp_float( blend_b, 0.0, 1.0 );
blend_a = clamp_float( blend_a, 0.0, 1.0 );
blend_image_ptr[ i ] = (BYTE)( blend_r * 255 ); // R
blend_image_ptr[ i + 1 ] = (BYTE)( blend_g * 255 ); // G
blend_image_ptr[ i + 2 ] = (BYTE)( blend_b * 255 ); // B
blend_image_f_ptr = (float *)&blend_image_ptr[ i + 3 ];
blend_image_f_ptr[ 0 ] = (float)blend_a; // A
blend_image_f_ptr[ 1 ] = (float)over_z_f; // Z
}
return EXIT_SUCCESS;
}
/*========================================================*/
/**
* @brief Alpha-blend compositing (RGBAZ96 Pixels)
* Back-to-Front Order
*
* @param over_image [in] Image to be alpha blended
* @param under_image [in] Image to be alpha blended
* @param blend_image [in] Image to be alpha blended
* @param image_size [in] Image size
*/
/*========================================================*/
#ifdef C99
int composite_alpha_rgbaz96 \
( BYTE* restrict over_image, \
BYTE* restrict under_image, \
BYTE* restrict blend_image, \
unsigned int image_size )
#else
int composite_alpha_rgbaz96 \
( BYTE* over_image, \
BYTE* under_image, \
BYTE* blend_image, \
unsigned int image_size )
#endif
{
unsigned int i;
unsigned int full_image_size;
float* over_image_f_ptr;
float* under_image_f_ptr;
float* blend_image_f_ptr;
BYTE over_r;
BYTE over_g;
BYTE over_b;
BYTE under_r;
BYTE under_g;
BYTE under_b;
float over_r_f;
float over_g_f;
float over_b_f;
float over_a_f;
float over_z_f;
float under_r_f;
float under_g_f;
float under_b_f;
float under_a_f;
float under_z_f;
float blend_r;
float blend_g;
float blend_b;
float blend_a;
float blend_z;
float one_minus_alpha;
blend_image_f_ptr = (float *)blend_image;
over_image_f_ptr = (float *)over_image;
under_image_f_ptr = (float *)under_image;
full_image_size = image_size * RGBAZ96; // 12 BYTES
//=====================================
// Shared Memory Parallelism
//=====================================
#if defined ( _OPENMP )
#pragma omp parallel for \
private( i, one_minus_alpha, \
over_image_f_ptr, under_image_f_ptr, blend_image_f_ptr, \
over_r, over_g, over_b, \
under_r, under_g, under_b, \
over_r_f, over_g_f, over_b_f, over_a_f, over_z_f, \
under_r_f, under_g_f, under_b_f, under_a_f, under_z_f, \
blend_r, blend_g, blend_b, blend_a )
#endif
for ( i = 0; i < full_image_size; i += RGBAZ96 ) // SKIP 12 BYTES
{
over_r = (BYTE)over_image[ i ]; // R
over_g = (BYTE)over_image[ i + 1 ]; // G
over_b = (BYTE)over_image[ i + 2 ]; // B
over_image_f_ptr = (float *)&over_image[ i + 4 ];
over_a_f = (float)over_image_f_ptr[ 0 ]; // A
over_z_f = (float)over_image_f_ptr[ 1 ]; // Z
under_r = (BYTE)under_image[ i ]; // R
under_g = (BYTE)under_image[ i + 1 ]; // G
under_b = (BYTE)under_image[ i + 2 ]; // B
under_image_f_ptr = (float *)&under_image[ i + 4 ];
under_a_f = (float)under_image_f_ptr[ 0 ]; // A
under_z_f = (float)under_image_f_ptr[ 1 ]; // Z
// Depth sorting if necessary
if ( over_z_f > under_z_f )
{
blend_r = over_r;
blend_g = over_g;
blend_b = over_b;
blend_a = over_a_f;
blend_z = over_z_f;
over_r = under_r;
over_g = under_g;
over_b = under_b;
over_a_f = under_a_f;
over_z_f = under_z_f;
under_r = blend_r;
under_g = blend_g;
under_b = blend_b;
under_a_f = blend_a;
under_z_f = blend_z;
}
// Convert BYTE to Float (Range: 0.0 - 1.0)
over_r_f = (float)( over_r / 255.0f );
over_g_f = (float)( over_g / 255.0f );
over_b_f = (float)( over_b / 255.0f );
under_r_f = (float)( under_r / 255.0f );
under_g_f = (float)( under_g / 255.0f );
under_b_f = (float)( under_b / 255.0f );
// Pre-calculate 1 - Src_A
one_minus_alpha = 1.0f - over_a_f;
// Calculate Final alpha value
blend_a = (float) ( over_a_f + ( under_a_f * one_minus_alpha ));
blend_r = (float)( over_r_f + ( under_r_f * one_minus_alpha ));
blend_g = (float)( over_g_f + ( under_g_f * one_minus_alpha ));
blend_b = (float)( over_b_f + ( under_b_f * one_minus_alpha ));
// =======================================================
blend_r = clamp_float( blend_r, 0.0, 1.0 );
blend_g = clamp_float( blend_g, 0.0, 1.0 );
blend_b = clamp_float( blend_b, 0.0, 1.0 );
blend_a = clamp_float( blend_a, 0.0, 1.0 );
blend_image[ i ] = (BYTE)( blend_r * 255 ); // R
blend_image[ i + 1 ] = (BYTE)( blend_g * 255 ); // G
blend_image[ i + 2 ] = (BYTE)( blend_b * 255 ); // B
blend_image[ i + 3 ] = (BYTE)0; // X
blend_image_f_ptr = (float *)&blend_image[ i + 4 ];
blend_image_f_ptr[ 0 ] = (float)blend_a; // A
blend_image_f_ptr[ 1 ] = (float)over_z_f; // Z
}
return EXIT_SUCCESS;
}
/*========================================================*/
/**
* @brief Alpha-blend compositing (RGBA128 Pixels)
* Back-to-Front Order
*
* @param over_image [in] Image to be alpha blended
* @param under_image [in] Image to be alpha blended
* @param blend_image [in] Image to be alpha blended
* @param image_size [in] Image size
*/
/*========================================================*/
#ifdef C99
int composite_alpha_rgba128 \
( float* restrict over_image, \
float* restrict under_image, \
float* restrict blend_image, \
unsigned int image_size )
#else
int composite_alpha_rgba128 \
( float* over_image, \
float* under_image, \
float* blend_image, \
unsigned int image_size )
#endif
{
unsigned int i;
unsigned int full_image_size;
float under_r;
float under_g;
float under_b;
float under_a;
float over_r;
float over_g;
float over_b;
float over_a;
float blend_r;
float blend_g;
float blend_b;
float blend_a;
float one_minus_alpha;
//=====================================
// Shared Memory Parallelism
//=====================================
full_image_size = image_size * RGBA; // 4 elements
#if defined ( _OPENMP )
#pragma omp parallel for \
private( i, one_minus_alpha, \
over_r, over_g, over_b, over_a, \
under_r, under_g, under_b, under_a, \
blend_r, blend_g, blend_b, blend_a )
#endif
for ( i = 0; i < full_image_size; i += RGBA ) // SKIP 4 elements(FLOAT)
{
// Separate R, G, B and A values of both
// the foreground and background colors
over_r = (float)over_image[ i ];
over_g = (float)over_image[ i + 1 ];
over_b = (float)over_image[ i + 2 ];
over_a = (float)over_image[ i + 3 ];
under_r = (float)under_image[ i ];
under_g = (float)under_image[ i + 1 ];
under_b = (float)under_image[ i + 2 ];
under_a = (float)under_image[ i + 3 ];
// =============================================
// Eliminate branching for compiler optimization
// =============================================
// Pre-calculate 1 - Src_A
one_minus_alpha = (float)(1.0f - over_a);
// Calculate Final alpha value
blend_a = (float)( over_a + ( under_a * one_minus_alpha ));
blend_r = (float)( over_r + ( under_r * one_minus_alpha ));
blend_g = (float)( over_g + ( under_g * one_minus_alpha ));
blend_b = (float)( over_b + ( under_b * one_minus_alpha ));
// =======================================================
blend_r = clamp_float( blend_r, 0.0, 1.0 );
blend_g = clamp_float( blend_g, 0.0, 1.0 );
blend_b = clamp_float( blend_b, 0.0, 1.0 );
blend_a = clamp_float( blend_a, 0.0, 1.0 );
blend_image[ i ] = (float)blend_r;
blend_image[ i + 1 ] = (float)blend_g;
blend_image[ i + 2 ] = (float)blend_b;
blend_image[ i + 3 ] = (float)blend_a;
}
return EXIT_SUCCESS;
}
/*========================================================*/
/**
* @brief Alpha-blend compositing (RGBAZ160 Pixels)
* Back-to-Front Order
*
* @param over_image [in] Image to be alpha blended
* @param under_image [in] Image to be alpha blended
* @param blend_image [in] Image to be alpha blended
* @param image_size [in] Image size
*/
/*========================================================*/
#ifdef C99
int composite_alpha_rgbaz160 \
( float* restrict over_image, \
float* restrict under_image, \
float* restrict blend_image, \
unsigned int image_size )
#else
int composite_alpha_rgbaz160 \
( float* over_image, \
float* under_image, \
float* blend_image, \
unsigned int image_size )
#endif
{
unsigned int i;
unsigned int full_image_size;
float under_r;
float under_g;
float under_b;
float under_a;
float under_z;
float over_r;
float over_g;
float over_b;
float over_a;
float over_z;
float blend_r;
float blend_g;
float blend_b;
float blend_a;
float blend_z;
float one_minus_alpha;
blend_z = 0.0f;
full_image_size = image_size * RGBAZ; // 5 elements
//=====================================
// Shared Memory Parallelism
//=====================================
#if defined ( _OPENMP )
#pragma omp parallel for \
private( i, one_minus_alpha, \
over_r, over_g, over_b, over_a, over_z, \
under_r, under_g, under_b, under_a, under_z, \
blend_r, blend_g, blend_b, blend_a, blend_z )
#endif
for ( i = 0; i < full_image_size; i += RGBAZ ) // SKIP 5 elements(FLOAT)
{
// Separate R, G, B and A values of both
// the foreground and background colors
over_r = (float)over_image[ i ];
over_g = (float)over_image[ i + 1 ];
over_b = (float)over_image[ i + 2 ];
over_a = (float)over_image[ i + 3 ];
over_z = (float)over_image[ i + 4 ];
under_r = (float)under_image[ i ];
under_g = (float)under_image[ i + 1 ];
under_b = (float)under_image[ i + 2 ];
under_a = (float)under_image[ i + 3 ];
under_z = (float)under_image[ i + 4 ];
// Depth sorting if necessary
if ( over_z > under_z )
{
blend_r = over_r;
blend_g = over_g;
blend_b = over_b;
blend_a = over_a;
blend_z = over_z;
over_r = under_r;
over_g = under_g;
over_b = under_b;
over_a = under_a;
over_z = under_z;
under_r = blend_r;
under_g = blend_g;
under_b = blend_b;
under_a = blend_a;
under_z = blend_z;
}
// =======================================================
// Pre-calculate 1 - Src_A
one_minus_alpha = (float)(1.0f - over_a);
// Calculate Final alpha value
blend_a = (float)( over_a + ( under_a * one_minus_alpha ));
blend_r = (float)( over_r + ( under_r * one_minus_alpha ));
blend_g = (float)( over_g + ( under_g * one_minus_alpha ));
blend_b = (float)( over_b + ( under_b * one_minus_alpha ));
// =======================================================
blend_r = clamp_float( blend_r, 0.0, 1.0 );
blend_g = clamp_float( blend_g, 0.0, 1.0 );
blend_b = clamp_float( blend_b, 0.0, 1.0 );
blend_a = clamp_float( blend_a, 0.0, 1.0 );
blend_image[ i ] = (float)blend_r;
blend_image[ i + 1 ] = (float)blend_g;
blend_image[ i + 2 ] = (float)blend_b;
blend_image[ i + 3 ] = (float)blend_a;
blend_image[ i + 4 ] = (float)over_z;
}
return EXIT_SUCCESS;
}
// =================================================================
// ALPHA BLENDING IMAGE COMPOSITION
// =================================================================
// Optimization based on LUT (HP Image Compositing Library)
//
// Parallel Compositing Library
// http://sourceforge.net/projects/paracomp
// =================================================================
/* BEGINPARACOMPCOPYRIGHT
* The Parallel Compositing Library
* Copyright (c) 2007 Hewlett-Packard Development Company, L.P.
* This library is free software; you can redistribute it and/or modify it under
* the terms of the GNU Lesser General Public License as published by the Free
* Software Foundation; either version 2.1 of the License, or (at your option)
* any later version.
* This library is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
* details.
* ENDPARACOMPCOPYRIGHT
*/
/*===========================================================================*/
/**
* @brief Generate look up tables for alpha blending operation.
*/
/*===========================================================================*/
void Create_AlphaBlend_LUT ( void )
{
unsigned int Alpha;
unsigned int Color;
unsigned int Saturation;
unsigned int Color_Val;
unsigned int Alpha_Div_256;
unsigned int Alpha_Sub_255;
// Generate Product Lookup Table
for ( Alpha = 0; Alpha < 256; Alpha++ )
{
Alpha_Div_256 = Alpha << 8;
Alpha_Sub_255 = 255 - Alpha;
for ( Color = 0; Color < 256; Color++ )
{
Color_Val = (unsigned int)(( Alpha_Sub_255 * Color ) + 0x80 ) >> 8;
LUT_Mult[ Alpha_Div_256 | Color ] = ( BYTE )Color_Val;
}
}
// Generate Saturation Lookup Table
for ( Saturation = 0; Saturation < 512; Saturation++ )
{
LUT_Sat[ Saturation ] = (BYTE)(( Saturation > 255 ) ? 255 : Saturation);
}
}
/*===========================================================================*/
/**
* @brief Alpha-blend compositing.
*
* @param order [in] OVER or UNDER
* @param othr_image [in] Image to be alpha blended
* @param recv_image [in] Image to be alpha blended
* @param image_size [in] Image size
* @param blnd_image [out] Alpha blended image
*/
/*===========================================================================*/
#ifdef C99
int composite_alpha_rgba32_LUT \
( BYTE* restrict over_image, \
BYTE* restrict under_image, \
BYTE* restrict blend_image, \
unsigned int image_size )
#else
int composite_alpha_rgba32_LUT \
( BYTE* over_image, \
BYTE* under_image, \
BYTE* blend_image, \
unsigned int image_size )
#endif
{
unsigned int i;
unsigned int full_image_size;
BYTE *blend_image_ptr;
BYTE under_r;
BYTE under_g;
BYTE under_b;
BYTE under_a;
BYTE over_r;
BYTE over_g;
BYTE over_b;
BYTE over_a;
BYTE blend_r;
BYTE blend_g;
BYTE blend_b;
BYTE blend_a;
unsigned int Alpha_Div_256;
blend_image_ptr = (BYTE *)blend_image;
//=====================================
// Shared Memory Parallelism
//=====================================
full_image_size = image_size * RGBA32;
#if defined ( _OPENMP )
#pragma omp parallel for \
private( i, Alpha_Div_256, \
over_r, over_g, over_b, over_a, \
under_r, under_g, under_b, under_a, \
blend_r, blend_g, blend_b, blend_a )
#endif
for ( i = 0; i < full_image_size; i += RGBA ) // SKIP 4 elements
{
// Separate R, G, B and A values of both
// the foreground and background colors
over_r = (BYTE)over_image[ i ];
over_g = (BYTE)over_image[ i + 1 ];
over_b = (BYTE)over_image[ i + 2 ];
over_a = (BYTE)over_image[ i + 3 ];
under_r = (BYTE)under_image[ i ];
under_g = (BYTE)under_image[ i + 1 ];
under_b = (BYTE)under_image[ i + 2 ];
under_a = (BYTE)under_image[ i + 3 ];
Alpha_Div_256 = ((unsigned int)over_a) << 8;
// Calculate RGBA component values
blend_r = (BYTE)LUT_Sat[ ((unsigned int)LUT_Mult[ \
Alpha_Div_256 | ((unsigned int)under_r ) ] ) \
+ ((unsigned int)over_r ) ];
blend_g = (BYTE)LUT_Sat[ ((unsigned int)LUT_Mult[ \
Alpha_Div_256 | ((unsigned int)under_g) ] ) \
+ ((unsigned int)over_g) ];
blend_b = (BYTE)LUT_Sat[ ((unsigned int)LUT_Mult[ \
Alpha_Div_256 | ((unsigned int)under_b ) ] ) \
+ ((unsigned int)over_b ) ];
blend_a = (BYTE)LUT_Sat[ ((unsigned int)LUT_Mult[ \
Alpha_Div_256 | ((unsigned int)under_a) ] ) \
+ ((unsigned int)over_a) ];
blend_image_ptr[ i ] = (BYTE)( blend_r );
blend_image_ptr[ i + 1 ] = (BYTE)( blend_g );
blend_image_ptr[ i + 2 ] = (BYTE)( blend_b );
blend_image_ptr[ i + 3 ] = (BYTE)( blend_a );
}
return EXIT_SUCCESS;
}
|
knn.h | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "cuda_utils.h"
#include <faiss/Heap.h>
#include <faiss/gpu/GpuDistance.h>
#include <faiss/gpu/GpuIndexFlat.h>
#include <faiss/gpu/GpuResources.h>
#include <faiss/gpu/StandardGpuResources.h>
#include <iostream>
namespace MLCommon {
namespace Selection {
/** Merge results from several shards into a single result set.
* @param n number of elements in search array
* @param k number of neighbors returned
* @param distances output distance array
* @param labels output index array
* @param all_distances row-wise stacked array of intermediary knn output distances size nshard * n * k
* @param all_labels row-wise stacked array of intermediary knn output indices size nshard * n * k
* @param translations label translations to apply, size nshard
*/
template <class C>
void merge_tables(long n, long k, long nshard, float *distances, long *labels,
float *all_distances, long *all_labels, long *translations) {
if (k == 0) {
return;
}
size_t stride = n * k;
#pragma omp parallel
{
std::vector<int> buf(2 * nshard);
int *pointer = buf.data();
int *shard_ids = pointer + nshard;
std::vector<float> buf2(nshard);
float *heap_vals = buf2.data();
#pragma omp for
for (long i = 0; i < n; i++) {
// the heap maps values to the shard where they are
// produced.
const float *D_in = all_distances + i * k;
const long *I_in = all_labels + i * k;
int heap_size = 0;
for (long s = 0; s < nshard; s++) {
pointer[s] = 0;
if (I_in[stride * s] >= 0)
faiss::heap_push<C>(++heap_size, heap_vals, shard_ids,
D_in[stride * s], s);
}
float *D = distances + i * k;
long *I = labels + i * k;
for (int j = 0; j < k; j++) {
if (heap_size == 0) {
I[j] = -1;
D[j] = C::neutral();
} else {
// pop best element
int s = shard_ids[0];
int &p = pointer[s];
D[j] = heap_vals[0];
I[j] = I_in[stride * s + p] + translations[s];
faiss::heap_pop<C>(heap_size--, heap_vals, shard_ids);
p++;
if (p < k && I_in[stride * s + p] >= 0)
faiss::heap_push<C>(++heap_size, heap_vals, shard_ids,
D_in[stride * s + p], s);
}
}
}
}
};
/**
* Search the kNN for the k-nearest neighbors of a set of query vectors
* @param input device memory to search as an array of device pointers
* @param sizes array of memory sizes
* @param n_params size of input and sizes arrays
* @param D number of cols in input and search_items
* @param search_items set of vectors to query for neighbors
* @param n number of items in search_items
* @param res_I pointer to device memory for returning k nearest indices
* @param res_D pointer to device memory for returning k nearest distances
* @param k number of neighbors to query
* @param s the cuda stream to use
*/
template <typename IntType = int>
void brute_force_knn(float **input, int *sizes, int n_params, IntType D,
float *search_items, IntType n, long *res_I, float *res_D,
IntType k, cudaStream_t s) {
std::vector<long> *id_ranges = new std::vector<long>();
IntType total_n = 0;
for (int i = 0; i < n_params; i++) {
if (i < n_params) // if i < sizes[i]
id_ranges->push_back(total_n);
total_n += sizes[i];
}
float *result_D = new float[k * size_t(n)];
long *result_I = new long[k * size_t(n)];
float *all_D = new float[n_params * k * size_t(n)];
long *all_I = new long[n_params * k * size_t(n)];
ASSERT_DEVICE_MEM(search_items, "search items");
ASSERT_DEVICE_MEM(res_I, "output index array");
ASSERT_DEVICE_MEM(res_D, "output distance array");
CUDA_CHECK(cudaStreamSynchronize(s));
#pragma omp parallel
{
#pragma omp for
for (int i = 0; i < n_params; i++) {
const float *ptr = input[i];
IntType size = sizes[i];
cudaPointerAttributes att;
cudaError_t err = cudaPointerGetAttributes(&att, ptr);
if (err == 0 && att.device > -1) {
CUDA_CHECK(cudaSetDevice(att.device));
CUDA_CHECK(cudaPeekAtLastError());
try {
faiss::gpu::StandardGpuResources gpu_res;
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
gpu_res.noTempMemory();
gpu_res.setCudaMallocWarning(false);
gpu_res.setDefaultStream(att.device, stream);
faiss::gpu::bruteForceKnn(
&gpu_res, faiss::METRIC_L2, ptr, true, size, search_items, true,
n, D, k, all_D + (long(i) * k * long(n)),
all_I + (long(i) * k * long(n)));
CUDA_CHECK(cudaPeekAtLastError());
CUDA_CHECK(cudaStreamSynchronize(stream));
CUDA_CHECK(cudaStreamDestroy(stream));
} catch (const std::exception &e) {
std::cout << "Exception occurred: " << e.what() << std::endl;
}
} else {
std::stringstream ss;
ss << "Input memory for " << ptr
<< " failed. isDevice?=" << att.devicePointer << ", N=" << sizes[i];
std::cout << "Exception: " << ss.str() << std::endl;
}
}
}
merge_tables<faiss::CMin<float, IntType>>(
long(n), k, n_params, result_D, result_I, all_D, all_I, id_ranges->data());
MLCommon::updateDevice(res_D, result_D, k * size_t(n), s);
MLCommon::updateDevice(res_I, result_I, k * size_t(n), s);
delete all_D;
delete all_I;
delete result_D;
delete result_I;
};
}; // namespace Selection
}; // namespace MLCommon
|
backward_binary_reduce_impl.h | /*!
* Copyright (c) 2019 by Contributors
* \file kernel/cuda/backward_binary_reduce_impl.h
* \brief Minigun CPU UDFs for bacward binary reduce
*/
#ifndef DGL_KERNEL_CPU_BACKWARD_BINARY_REDUCE_IMPL_H_
#define DGL_KERNEL_CPU_BACKWARD_BINARY_REDUCE_IMPL_H_
#include <minigun/minigun.h>
#include "../binary_reduce_impl_decl.h"
#include "../utils.h"
#include "./functor.h"
#include "../csr_interface.h"
namespace dgl {
namespace kernel {
namespace cpu {
// Minigun UDF to compute backward binary reduce.
template <int Mode, typename Idx, typename DType, typename Functors>
struct BackwardBinaryReduce {
static inline bool CondEdge(
Idx src, Idx dst, Idx eid, BackwardGData<Idx, DType>* gdata) {
return true;
}
static inline void ApplyEdge(
Idx src, Idx dst, Idx eid, BackwardGData<Idx, DType>* gdata) {
const int64_t D = gdata->x_length;
const int64_t len = gdata->data_len;
Idx lid = Functors::SelectLeft(src, eid, dst);
Idx rid = Functors::SelectRight(src, eid, dst);
Idx oid = Functors::SelectOut(src, eid, dst);
if (gdata->lhs_mapping) {
lid = Functors::GetId(lid, gdata->lhs_mapping);
}
if (gdata->rhs_mapping) {
rid = Functors::GetId(rid, gdata->rhs_mapping);
}
if (gdata->out_mapping) {
oid = Functors::GetId(oid, gdata->out_mapping);
}
DType* lhsoff = gdata->lhs_data + lid * D * len;
DType* rhsoff = gdata->rhs_data + rid * D * len;
DType* outoff = gdata->out_data + oid * D;
DType* gradlhsoff = gdata->grad_lhs_data + lid * D * len;
DType* gradrhsoff = gdata->grad_rhs_data + rid * D * len;
DType* gradoutoff = gdata->grad_out_data + oid * D;
for (int64_t tx = 0; tx < D; ++tx) {
DType out = Functors::Read(outoff + tx);
DType grad_out = Functors::Read(gradoutoff + tx);
DType e = Functors::Op(lhsoff + tx * len, rhsoff + tx * len, len);
DType grad_e = grad_out * Functors::BackwardWrite(e, out);
DType* lhs_base = lhsoff + tx * len;
DType* rhs_base = rhsoff + tx * len;
if (Mode == binary_op::kGradBoth) {
for (int64_t i = 0; i < len; ++i) {
DType lhs = Functors::Read(lhs_base + i);
DType rhs = Functors::Read(rhs_base + i);
DType grad_lhs = grad_e * Functors::BackwardOpLhs(lhs, rhs, e);
DType grad_rhs = grad_e * Functors::BackwardOpRhs(lhs, rhs, e);
DType grad = grad_lhs + grad_rhs;
#pragma omp atomic
gradlhsoff[tx * len + i] += grad;
}
} else if (Mode == binary_op::kGradLhs) {
for (int64_t i = 0; i < len; ++i) {
DType lhs = Functors::Read(lhs_base + i);
DType rhs = Functors::Read(rhs_base + i);
DType grad_lhs = grad_e * Functors::BackwardOpLhs(lhs, rhs, e);
#pragma omp atomic
gradlhsoff[tx * len + i] += grad_lhs;
}
} else if (Mode == binary_op::kGradRhs) {
for (int64_t i = 0; i < len; ++i) {
DType lhs = Functors::Read(lhs_base + i);
DType rhs = Functors::Read(rhs_base + i);
DType grad_rhs = grad_e * Functors::BackwardOpRhs(lhs, rhs, e);
#pragma omp atomic
gradrhsoff[tx * len + i] += grad_rhs;
}
}
}
}
};
// Minigun UDF to compute backward binary reduce with broadcasting.
template <int Mode, int NDim,
typename Idx, typename DType, typename Functors>
struct BackwardBinaryReduceBcast {
static inline bool CondEdge(
Idx src, Idx dst, Idx eid, BackwardBcastGData<NDim, Idx, DType>* gdata) {
return true;
}
static inline void ApplyEdge(
Idx src, Idx dst, Idx eid, BackwardBcastGData<NDim, Idx, DType>* gdata) {
const int64_t len = gdata->data_len;
Idx lid = Functors::SelectLeft(src, eid, dst);
Idx rid = Functors::SelectRight(src, eid, dst);
Idx oid = Functors::SelectOut(src, eid, dst);
if (gdata->lhs_mapping) {
lid = Functors::GetId(lid, gdata->lhs_mapping);
}
if (gdata->rhs_mapping) {
rid = Functors::GetId(rid, gdata->rhs_mapping);
}
if (gdata->out_mapping) {
oid = Functors::GetId(oid, gdata->out_mapping);
}
DType* lhsoff = gdata->lhs_data + lid * gdata->lhs_len * len;
DType* rhsoff = gdata->rhs_data + rid * gdata->rhs_len * len;
DType* outoff = gdata->out_data + oid * gdata->out_len;
DType* gradlhsoff = gdata->grad_lhs_data + lid * gdata->out_len * len;
DType* gradrhsoff = gdata->grad_rhs_data + rid * gdata->out_len * len;
DType* gradoutoff = gdata->grad_out_data + oid * gdata->out_len;
int64_t tmp[NDim]; // store unraveled idx.
for (int64_t tx = 0; tx < gdata->out_len; ++tx) {
Unravel(tx, gdata->ndim, gdata->out_shape, gdata->out_stride, tmp);
DType out = Functors::Read(outoff + tx);
DType grad_out = Functors::Read(gradoutoff + tx);
DType e = Functors::Op(
lhsoff + Ravel(tmp, gdata->ndim, gdata->lhs_shape, gdata->lhs_stride) * len,
rhsoff + Ravel(tmp, gdata->ndim, gdata->rhs_shape, gdata->rhs_stride) * len,
len);
DType grad_e = grad_out * Functors::BackwardWrite(e, out);
DType* lhs_base = lhsoff +
Ravel(tmp, gdata->ndim, gdata->lhs_shape, gdata->lhs_stride) * len;
DType* rhs_base = rhsoff +
Ravel(tmp, gdata->ndim, gdata->rhs_shape, gdata->rhs_stride) * len;
if (Mode == binary_op::kGradBoth) {
for (int64_t i = 0; i < len; ++i) {
DType lhs = Functors::Read(lhs_base + i);
DType rhs = Functors::Read(rhs_base + i);
DType grad_lhs = grad_e * Functors::BackwardOpLhs(lhs, rhs, e);
DType grad_rhs = grad_e * Functors::BackwardOpRhs(lhs, rhs, e);
DType grad = grad_lhs + grad_rhs;
#pragma omp atomic
gradlhsoff[tx * len + i] += grad;
}
} else if (Mode == binary_op::kGradLhs) {
for (int64_t i = 0; i < len; ++i) {
DType lhs = Functors::Read(lhs_base + i);
DType rhs = Functors::Read(rhs_base + i);
DType grad_lhs = grad_e * Functors::BackwardOpLhs(lhs, rhs, e);
#pragma omp atomic
gradlhsoff[tx * len + i] += grad_lhs;
}
} else if (Mode == binary_op::kGradRhs) {
for (int64_t i = 0; i < len; ++i) {
DType lhs = Functors::Read(lhs_base + i);
DType rhs = Functors::Read(rhs_base + i);
DType grad_rhs = grad_e * Functors::BackwardOpRhs(lhs, rhs, e);
#pragma omp atomic
gradrhsoff[tx * len + i] += grad_rhs;
}
}
}
}
};
// Auxiliary template used in UDF.
template <typename Idx, typename DType,
typename LeftSelector, typename RightSelector,
typename BinaryOp, typename Reducer>
struct BackwardFunctorsTempl {
static inline Idx SelectOut(
Idx src, Idx edge, Idx dst) {
typedef typename OutSelector<Reducer>::Type OutTarget;
return SwitchSrcDst<OutTarget>::Type::Call(src, edge, dst);
}
static inline Idx SelectLeft(
Idx src, Idx edge, Idx dst) {
return LeftSelector::Call(src, edge, dst);
}
static inline Idx SelectRight(
Idx src, Idx edge, Idx dst) {
return RightSelector::Call(src, edge, dst);
}
static inline DType Op(DType* lhs, DType* rhs, int64_t len) {
return BinaryOp::Call(lhs, rhs, len);
}
static inline DType Read(DType* addr) {
return *addr;
}
static inline void Write(DType* addr, DType val) {
Reducer::Call(addr, val);
}
static inline Idx GetId(Idx id, Idx* id_map) {
return *(id_map + id);
}
static inline DType BackwardWrite(DType val, DType accum) {
return Reducer::BackwardCall(val, accum);
}
static inline DType BackwardOpLhs(DType lhs, DType rhs, DType out) {
return BinaryOp::BackwardLhs(lhs, rhs, out);
}
static inline DType BackwardOpRhs(DType lhs, DType rhs, DType out) {
return BinaryOp::BackwardRhs(lhs, rhs, out);
}
};
typedef minigun::advance::Config<true, minigun::advance::kV2N> AdvanceConfig;
} // namespace cpu
// Template implementation of BackwardBinaryReduce operator.
template <int XPU, int Mode, typename Idx, typename DType,
typename LeftSelector, typename RightSelector,
typename BinaryOp, typename Reducer>
void CallBackwardBinaryReduce(
const minigun::advance::RuntimeConfig& rtcfg,
const CSRWrapper& graph,
BackwardGData<Idx, DType>* gdata) {
// For backward computation, we use reverse csr and switch dst and src.
// This benefits the most common src_op_edge or copy_src case, because the
// gradients of src are now aggregated into destination buffer to reduce
// competition of atomic add.
auto incsr = graph.GetInCSRMatrix();
minigun::Csr<Idx> csr = utils::CreateCsr<Idx>(incsr.indptr, incsr.indices);
typedef cpu::BackwardFunctorsTempl<Idx, DType,
typename SwitchSrcDst<LeftSelector>::Type,
typename SwitchSrcDst<RightSelector>::Type,
BinaryOp, Reducer> Functors;
typedef cpu::BackwardBinaryReduce<Mode, Idx, DType, Functors> UDF;
// If the user-given mapping is none and the target is edge data, we need to
// replace the mapping by the edge ids in the csr graph so that the edge
// data is correctly read/written.
if (LeftSelector::target == binary_op::kEdge
&& gdata->lhs_mapping == nullptr) {
gdata->lhs_mapping = static_cast<Idx*>(incsr.data->data);
}
if (RightSelector::target == binary_op::kEdge
&& gdata->rhs_mapping == nullptr) {
gdata->rhs_mapping = static_cast<Idx*>(incsr.data->data);
}
if (OutSelector<Reducer>::Type::target == binary_op::kEdge
&& gdata->out_mapping == nullptr) {
gdata->out_mapping = static_cast<Idx*>(incsr.data->data);
}
// TODO(minjie): allocator
minigun::advance::Advance<XPU, Idx, cpu::AdvanceConfig, BackwardGData<Idx, DType>, UDF>(
rtcfg, csr, gdata, minigun::IntArray1D<Idx>());
}
// Following macro is used to generate explicit-specialization of the template
// operator.
#define GEN_BACKWARD_DEFINE(mode, dtype, lhs_tgt, rhs_tgt, op) \
template void CallBackwardBinaryReduce<XPU, \
mode, IDX, dtype, \
lhs_tgt, rhs_tgt, \
op<dtype>, REDUCER<XPU, dtype>>( \
const minigun::advance::RuntimeConfig& rtcfg, \
const CSRWrapper& graph, \
BackwardGData<IDX, dtype>* gdata);
// Template implementation of BackwardBinaryReduce with broadcasting operator.
template <int XPU, int Mode, int NDim, typename Idx, typename DType,
typename LeftSelector, typename RightSelector,
typename BinaryOp, typename Reducer>
void CallBackwardBinaryReduceBcast(
const minigun::advance::RuntimeConfig& rtcfg,
const CSRWrapper& graph,
BackwardBcastGData<NDim, Idx, DType>* gdata) {
// For backward computation, we use reverse csr and switch dst and src.
// This benefits the most common src_op_edge or copy_src case, because the
// gradients of src are now aggregated into destination buffer to reduce
// competition of atomic add.
auto incsr = graph.GetInCSRMatrix();
minigun::Csr<Idx> csr = utils::CreateCsr<Idx>(incsr.indptr, incsr.indices);
typedef cpu::BackwardFunctorsTempl<Idx, DType,
typename SwitchSrcDst<LeftSelector>::Type,
typename SwitchSrcDst<RightSelector>::Type,
BinaryOp, Reducer> Functors;
typedef cpu::BackwardBinaryReduceBcast<Mode, NDim, Idx, DType, Functors> UDF;
// If the user-given mapping is none and the target is edge data, we need to
// replace the mapping by the edge ids in the csr graph so that the edge
// data is correctly read/written.
if (LeftSelector::target == binary_op::kEdge
&& gdata->lhs_mapping == nullptr) {
gdata->lhs_mapping = static_cast<Idx*>(incsr.data->data);
}
if (RightSelector::target == binary_op::kEdge
&& gdata->rhs_mapping == nullptr) {
gdata->rhs_mapping = static_cast<Idx*>(incsr.data->data);
}
if (OutSelector<Reducer>::Type::target == binary_op::kEdge
&& gdata->out_mapping == nullptr) {
gdata->out_mapping = static_cast<Idx*>(incsr.data->data);
}
// TODO(minjie): allocator
minigun::advance::Advance<XPU, Idx, cpu::AdvanceConfig,
BackwardBcastGData<NDim, Idx, DType>, UDF>(
rtcfg, csr, gdata, minigun::IntArray1D<Idx>());
}
// Following macro is used to generate explicit-specialization of the template
// operator.
#define GEN_BACKWARD_BCAST_DEFINE(mode, ndim, dtype, lhs_tgt, rhs_tgt, op) \
template void CallBackwardBinaryReduceBcast<XPU, \
mode, ndim, IDX, dtype, \
lhs_tgt, rhs_tgt, \
op<dtype>, REDUCER<XPU, dtype>>( \
const minigun::advance::RuntimeConfig& rtcfg, \
const CSRWrapper& graph, \
BackwardBcastGData<ndim, IDX, dtype>* gdata);
} // namespace kernel
} // namespace dgl
#endif // DGL_KERNEL_CPU_BACKWARD_BINARY_REDUCE_IMPL_H_
|
GB_binop__isne_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isne_uint8)
// A.*B function (eWiseMult): GB (_AemultB_08__isne_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__isne_uint8)
// A.*B function (eWiseMult): GB (_AemultB_04__isne_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isne_uint8)
// A*D function (colscale): GB (_AxD__isne_uint8)
// D*A function (rowscale): GB (_DxB__isne_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__isne_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__isne_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isne_uint8)
// C=scalar+B GB (_bind1st__isne_uint8)
// C=scalar+B' GB (_bind1st_tran__isne_uint8)
// C=A+scalar GB (_bind2nd__isne_uint8)
// C=A'+scalar GB (_bind2nd_tran__isne_uint8)
// C type: uint8_t
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISNE || GxB_NO_UINT8 || GxB_NO_ISNE_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__isne_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isne_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isne_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isne_uint8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isne_uint8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isne_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isne_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isne_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isne_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isne_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isne_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isne_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__isne_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__isne_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
dim3_sweep.c | /***********************************************************************
* Module: dim3_sweep.c
*
* This module contains the 2D and 3D mesh sweep logic.
***********************************************************************/
#include "snap.h"
// Local variable array macro
#define PSI_1D(ANG) psi[ANG]
#define PC_1D(ANG) pc[ANG]
#define DEN_1D(ANG) den[ANG]
#ifdef ROWORDER
#define HV_2D(ANG, X) hv[ ANG*4 \
+ X ]
#else
#define HV_2D(ANG, X) hv[ X*NANG \
+ ANG ]
#endif
#ifdef ROWORDER
#define FXHV_2D(ANG, X) fxhv[ ANG*4 \
+ X ]
#else
#define FXHV_2D(ANG, X) fxhv[ X*NANG \
+ ANG ]
#endif
// Simplify array indexing when certain values constant throughout module
#define PSII_3D(ANG, Y, Z) PSII_4D(ANG, Y, Z, (g-1))
#define PSIJ_3D(ANG, CHUNK, Z) PSIJ_4D(ANG, CHUNK, Z, (g-1))
#define PSIK_3D(ANG, CHUNK, Y) PSIK_4D(ANG, CHUNK, Y, (g-1))
#define QTOT_4D(MOM1, X, Y, Z) QTOT_5D(MOM1, X, Y, Z, (g-1))
#define EC_2D(ANG, MOM1) EC_3D(ANG, MOM1, (oct-1))
#define VDELT_CONST VDELT_1D(g-1)
#define PTR_IN_4D(ANG, X, Y, Z) PTR_IN_6D(ANG, X, Y, Z, (i1-1), (i2-1))
#define PTR_OUT_4D(ANG, X, Y, Z) PTR_OUT_6D(ANG, X, Y, Z, (i1-1), (i2-1))
#define DINV_4D(ANG, X, Y, Z) DINV_5D(ANG, X, Y, Z, (g-1))
#define FLUX_3D(X, Y, Z) FLUX_4D(X, Y, Z, (g-1))
#define FLUXM_4D(MOM1, X, Y, Z) FLUXM_5D(MOM1, X, Y, Z, (g-1))
#define JB_IN_3D(ANG, CHUNK, Z) JB_IN_4D(ANG, CHUNK, Z, (g-1))
#define JB_OUT_3D(ANG, CHUNK, Z) JB_OUT_4D(ANG, CHUNK, Z, (g-1))
#define KB_IN_3D(ANG, CHUNK, Y) KB_IN_4D(ANG, CHUNK, Y, (g-1))
#define KB_OUT_3D(ANG, CHUNK, Y) KB_OUT_4D(ANG, CHUNK, Y, (g-1))
#define FLKX_3D(X, Y, Z) FLKX_4D(X, Y, Z, (g-1))
#define FLKY_3D(X, Y, Z) FLKY_4D(X, Y, Z, (g-1))
#define FLKZ_3D(X, Y, Z) FLKZ_4D(X, Y, Z, (g-1))
#define T_XS_3D(X, Y, Z) T_XS_4D(X, Y, Z, (g-1))
void dim3_sweep_data_init ( dim_sweep_data *dim_sweep_vars )
{
FMIN = 0;
FMAX = 0;
}
/***********************************************************************
* 3-D slab mesh sweeper.
***********************************************************************/
void dim3_sweep ( input_data *input_vars, para_data *para_vars,
geom_data *geom_vars, sn_data *sn_vars,
data_data *data_vars, control_data *control_vars,
solvar_data *solvar_vars, dim_sweep_data *dim_sweep_vars,
int ich, int id, int d1, int d2, int d3, int d4, int jd,
int kd, int jlo, int klo, int jhi, int khi, int jst, int kst,
int i1, int i2, int oct, int g, int *ierr )
{
/***********************************************************************
* Local variables
***********************************************************************/
int ist, d, n, ic, i, j, k, l, ibl, ibr, ibb, ibt, ibf, ibk;
int z_ind, y_ind, ic_ind, ang, indx1 = 4;
double sum_hv = 0, sum_hv_tmp = 0, sum_wpsi = 0, sum_ecwpsi = 0,
sum_wmupsii = 0, sum_wetapsij = 0, sum_wxipsik = 0;
double psi[NANG], pc[NANG], den[NANG];
double hv[NANG*4], fxhv[NANG*4];
double vec1_vec2_tmp[NANG], PSI_2X[NANG], hv_p1[NANG*4],
mu_hv[NANG], hj_hv[NANG], hk_hv[NANG], w_psi[NANG];
double unit_vec[indx1];
for ( i = 0; i < indx1; i++ )
{
unit_vec[i] = 1;
}
#ifdef USEVML
double PC_2X[NANG];
double PSII_HI_tmp[NANG], PSII_MU_HI_tmp[NANG],
PSIJ_HJ_tmp[NANG], PSIK_HK_tmp[NANG],
PSII_PSIJ_tmp[NANG], PSII_PSIJ_PSIK_tmp[NANG];
#endif
/***********************************************************************
* Set up the sweep order in the i-direction.
***********************************************************************/
ist = -1;
if ( id == 2 ) ist = 1;
/***********************************************************************
* Zero out the outgoing boundary arrays and fixup array
***********************************************************************/
for ( z_ind = 0; z_ind < NZ; z_ind++ )
{
for ( ic_ind = 0; ic_ind < ICHUNK; ic_ind++ )
{
for ( ang = 0; ang < NANG; ang++ )
{
JB_OUT_3D(ang,ic_ind,z_ind) = 0;
}
}
}
for ( y_ind = 0; y_ind < NY; y_ind++ )
{
for ( ic_ind = 0; ic_ind < ICHUNK; ic_ind++ )
{
for ( ang = 0; ang < NANG; ang++ )
{
KB_OUT_3D(ang,ic_ind,y_ind) = 0;
}
}
}
for ( i = 0; i < 4; i++)
{
for ( ang = 0; ang < NANG; ang++ )
{
FXHV_2D(ang, i) = 0;
}
}
/***********************************************************************
* Loop over cells along the diagonals. When only 1 diagonal, it's
* normal sweep order. Otherwise, nested threading performs mini-KBA.
***********************************************************************/
/***********************************************************************
* Commented out all nested OMP statements because not all compilers support
* these put them back in if you want.
***********************************************************************/
// #pragma omp parallel num_threads(NNESTED) default(shared) firstprivate(fxhv)
// {
// #endif
// diagonal loop
for ( d = 1; d <= NDIAG; d++ )
{
#pragma omp for schedule(static, 1) private(n,ic,i,j,k,l,psi,pc,sum_hv,hv,den)
// line_loop
for ( n = 1; n <= (DIAG_1D(d-1).lenc); n++ )
{
ic = DIAG_1D(d-1).cell_id_vars[n-1].ic;
if ( ist < 0 )
{
i = ich*ICHUNK - ic + 1;
}
else
{
i = (ich-1)*ICHUNK + ic;
}
if ( i <= NX )
{
j = DIAG_1D(d-1).cell_id_vars[n-1].jc;
if ( jst < 0 )
{
j = NY - j + 1;
}
k = DIAG_1D(d-1).cell_id_vars[n-1].kc;
if ( kst < 0 )
{
k = NZ - k + 1;
}
/***********************************************************************
* Left/right boundary conditions, always vacuum.
***********************************************************************/
ibl = 0;
ibr = 0;
if ( (i == NX) && (ist == -1) )
{
for ( ang = 0; ang < NANG; ang++ )
{
PSII_3D(ang,(j-1),(k-1)) = 0;
}
}
else if ( i == 1 && ist == 1 )
{
switch ( ibl )
{
case 0:
for ( ang = 0; ang < NANG; ang++ )
{
PSII_3D(ang,(j-1),(k-1)) = 0;
}
case 1:
for ( ang = 0; ang < NANG; ang++ )
{
PSII_3D(ang,(j-1),(k-1)) = 0;
}
}
}
/***********************************************************************
* Top/bottom boundary condtions. Vacuum at global boundaries, but
* set to some incoming flux from neighboring proc.
***********************************************************************/
ibb = 0;
ibt = 0;
if ( j == jlo )
{
if ( jd == 1 && LASTY )
{
for ( ang = 0; ang < NANG; ang++ )
{
PSIJ_3D(ang,(ic-1),(k-1)) = 0;
}
}
else if ( jd == 2 && FIRSTY )
{
switch ( ibb )
{
case 0:
for ( ang = 0; ang < NANG; ang++ )
{
PSIJ_3D(ang,(ic-1),(k-1)) = 0;
}
case 1:
for ( ang = 0; ang < NANG; ang++ )
{
PSIJ_3D(ang,(ic-1),(k-1)) = 0;
}
}
}
else
{
#ifdef USEMKL
cblas_dcopy(NANG, &JB_IN_3D(0,(ic-1),(k-1)), 1,
&PSIJ_3D(0,(ic-1),(k-1)), 1);
#elif defined USEBLAS
dcopy(NANG, &JB_IN_3D(0,(ic-1),(k-1)), 1,
&PSIJ_3D(0,(ic-1),(k-1)), 1);
#else
for ( ang = 0; ang < NANG; ang++ )
{
PSIJ_3D(ang,(ic-1),(k-1))
= JB_IN_3D(ang,(ic-1),(k-1));
}
#endif
}
}
/***********************************************************************
* Front/back boundary condtions. Vacuum at global boundaries, but
* set to some incoming flux from neighboring proc.
***********************************************************************/
ibf = 0;
ibk = 0;
if ( k == klo )
{
if ( (kd == 1 && LASTZ) || NDIMEN < 3 )
{
for ( ang = 0; ang < NANG; ang++ )
{
PSIK_3D(ang,(ic-1),(j-1)) = 0;
}
}
else if ( kd == 2 && FIRSTZ )
{
switch ( ibf )
{
case 0:
for ( ang = 0; ang < NANG; ang++ )
{
PSIK_3D(ang,(ic-1),(j-1)) = 0;
}
case 1:
for ( ang = 0; ang < NANG; ang++ )
{
PSIK_3D(ang,(ic-1),(j-1)) = 0;
}
}
}
else
{
#ifdef USEMKL
cblas_dcopy(NANG, &KB_IN_3D(0,(ic-1),(j-1)), 1,
&PSIK_3D(0,(ic-1),(j-1)), 1);
#elif defined USEBLAS
dcopy(NANG, &KB_IN_3D(0,(ic-1),(j-1)), 1,
&PSIK_3D(0,(ic-1),(j-1)), 1);
#else
for ( ang = 0; ang < NANG; ang++ )
{
PSIK_3D(ang,(ic-1),(j-1))
= KB_IN_3D(ang,(ic-1),(j-1));
}
#endif
}
}
/***********************************************************************
* Compute the angular source
***********************************************************************/
#ifdef USEMKL
for ( ang = 0; ang < NANG; ang++ )
{
PSI_1D(ang) = QTOT_4D(0,(i-1),(j-1),(k-1));
}
if ( SRC_OPT == 3 )
{
cblas_daxpy(NANG, 1,
&QIM_6D(0,(i-1),(j-1),(k-1),(oct-1),(g-1)),
1, psi, 1);
}
cblas_dgemv(CblasColMajor, CblasNoTrans, NANG, (CMOM-1), 1,
&EC_2D(0,1), NANG, &QTOT_4D(1,(i-1),(j-1),(k-1)),
1, 1, psi, 1);
#else
for ( ang = 0; ang < NANG; ang++ )
{
PSI_1D(ang) = QTOT_4D(0,(i-1),(j-1),(k-1));
if ( SRC_OPT == 3 )
{
PSI_1D(ang) +=
QIM_6D(ang,(i-1),(j-1),(k-1),(oct-1),(g-1));
}
}
for ( l = 2; l <=CMOM; l++ )
{
for ( ang = 0; ang < NANG; ang++ )
{
PSI_1D(ang) +=
EC_2D(ang,(l-1))
*QTOT_4D((l-1),(i-1),(j-1),(k-1));
}
}
#endif
/***********************************************************************
* Compute the numerator for the update formula
***********************************************************************/
#ifdef USEMKL
#ifdef USEVML_unchecked
// Use VML for vector lengths exceeding certain length
if ( NANG > VECLEN_MIN )
{
#ifdef MKLUPDATE
cblas_dcopy(NANG, psi, 1, pc, 1);
vmdMul(NANG, &PSII_3D(0,(j-1),(k-1)), MU, vec1_vec2_tmp,
VML_ACCURACY | VML_HANDLING | VML_ERROR );
cblas_daxpy(NANG, HI, vec1_vec2_tmp, 1, pc, 1);
vmdMul(NANG, &PSIJ_3D(0,(ic-1),(k-1)), HJ, vec1_vec2_tmp,
VML_ACCURACY | VML_HANDLING | VML_ERROR );
cblas_daxpy(NANG, 1, vec1_vec2_tmp, 1, pc, 1);
vmdMul(NANG, &PSIK_3D(0,(ic-1),(j-1)), HK, vec1_vec2_tmp,
VML_ACCURACY | VML_HANDLING | VML_ERROR );
cblas_daxpy(NANG, 1, vec1_vec2_tmp, 1, pc, 1);
#else
vmdMul(NANG, &PSII_3D(0,(j-1),(k-1)), MU, PSII_MU_HI_tmp,
VML_ACCURACY | VML_HANDLING | VML_ERROR );
cblas_dscal(NANG, HI, PSII_MU_HI_tmp, 1);
vmdMul(NANG, &PSIJ_3D(0,(ic-1),(k-1)), HJ, PSIJ_HJ_tmp,
VML_ACCURACY | VML_HANDLING | VML_ERROR );
vmdMul(NANG, &PSIK_3D(0,(ic-1),(k-1)), HK, PSIK_HK_tmp,
VML_ACCURACY | VML_HANDLING | VML_ERROR );
vmdAdd(NANG, PSIK_HK_tmp, PSIJ_HJ_tmp, PSIJ_HJ_tmp,
VML_ACCURACY | VML_HANDLING | VML_ERROR );
vmdAdd(NANG, PSII_MU_HI_tmp, PSIJ_HJ_tmp, PSIJ_HJ_tmp,
VML_ACCURACY | VML_HANDLING | VML_ERROR );
vmdAdd(NANG, psi, PSIJ_HJ_tmp, pc,
VML_ACCURACY | VML_HANDLING | VML_ERROR );
#endif
}
else
{
#ifdef MKLUPDATE
cblas_dcopy(NANG, psi, 1, pc, 1);
cblas_dsbmv(CblasColMajor, CblasLower, NANG, 0, HI,
&PSII_3D(0,(j-1),(k-1)), 1, MU, 1, 1, pc, 1);
cblas_dsbmv(CblasColMajor, CblasLower, NANG, 0, 1,
&PSIJ_3D(0,(ic-1),(k-1)), 1, HJ, 1, 1, pc, 1);
cblas_dsbmv(CblasColMajor, CblasLower, NANG, 0, 1,
&PSIK_3D(0,(ic-1),(j-1)), 1, HK, 1, 1, pc, 1);
#else
for ( ang = 0; ang < NANG; ang++ )
{
PC_1D(ang) = PSI_1D(ang)
+ PSII_3D(ang,(j-1),(k-1)) *MU_1D(ang)*HI
+ PSIJ_3D(ang,(ic-1),(k-1))*HJ_1D(ang)
+ PSIK_3D(ang,(ic-1),(j-1))*HK_1D(ang);
}
#endif
}
if ( VDELT_CONST != 0 )
{
cblas_daxpy(NANG, VDELT_CONST,
&PTR_IN_6D(0,(i-1),(j-1),(k-1),(i1-1),(i2-1)), 1, pc, 1);
}
#else
for ( ang = 0; ang < NANG; ang++ )
{
PC_1D(ang) = PSI_1D(ang)
+ PSII_3D(ang,(j-1),(k-1)) *MU_1D(ang)*HI
+ PSIJ_3D(ang,(ic-1),(k-1))*HJ_1D(ang)
+ PSIK_3D(ang,(ic-1),(j-1))*HK_1D(ang);
}
if ( VDELT_CONST != 0 )
{
cblas_daxpy(NANG, VDELT_CONST,
&PTR_IN_6D(0,(i-1),(j-1),(k-1),(i1-1),(i2-1)), 1, pc, 1);
}
#endif
#else
for ( ang = 0; ang < NANG; ang++ )
{
PC_1D(ang) = PSI_1D(ang)
+ PSII_3D(ang,(j-1),(k-1)) *MU_1D(ang)*HI
+ PSIJ_3D(ang,(ic-1),(k-1))*HJ_1D(ang)
+ PSIK_3D(ang,(ic-1),(j-1))*HK_1D(ang);
if ( VDELT_CONST != 0 )
{
PC_1D(ang) += VDELT_CONST
*PTR_IN_6D(ang,(i-1),(j-1),(k-1),(i1-1),(i2-1));
}
}
#endif
/***********************************************************************
* Compute the solution of the center. Use DD for edges. Use fixup
* if requested.
***********************************************************************/
if ( FIXUP == 0 )
{
//#ifdef USEMKL
#ifdef USEVML
if ( NANG > VECLEN_MIN )
{
vmdMul(NANG, pc, &DINV_4D(0,(i-1),(j-1),(k-1)), psi,
VML_ACCURACY | VML_HANDLING | VML_ERROR );
for ( ang = 0; ang < NANG; ang++ )
{
PSI_2X[ang] = 2*PSI_1D(ang);
}
vmdSub(NANG, PSI_2X, &PSII_3D(0, (j-1), (k-1)),
&PSII_3D(0, (j-1), (k-1)),
VML_ACCURACY | VML_HANDLING | VML_ERROR );
vmdSub(NANG, PSI_2X, &PSIJ_3D(0, (ic-1), (k-1)),
&PSIJ_3D(0, (ic-1), (k-1)),
VML_ACCURACY | VML_HANDLING | VML_ERROR );
if ( NDIMEN == 3)
{
vmdSub(NANG, PSI_2X, &PSIK_3D(0, (ic-1), (j-1)),
&PSIK_3D(0, (ic-1), (j-1)),
VML_ACCURACY | VML_HANDLING | VML_ERROR );
}
if ( VDELT_CONST != 0 )
{
vmdSub(NANG, PSI_2X, &PTR_IN_6D(0,(i-1),(j-1),(k-1),(i1-1),(i2-1)),
&PTR_OUT_6D(0,(i-1),(j-1),(k-1),(i1-1),(i2-1)),
VML_ACCURACY | VML_HANDLING | VML_ERROR );
}
}
else
{
for ( ang = 0; ang < NANG; ang++ )
{
PSI_1D(ang)
= PC_1D(ang)*DINV_4D(ang,(i-1),(j-1),(k-1));
PSII_3D(ang,(j-1),(k-1))
= 2*PSI_1D(ang) - PSII_3D(ang,(j-1),(k-1));
PSIJ_3D(ang,(ic-1),(k-1))
= 2*PSI_1D(ang) - PSIJ_3D(ang,(ic-1),(k-1));
if ( NDIMEN == 3 )
{
PSIK_3D(ang,(ic-1),(j-1))
= 2*PSI_1D(ang) - PSIK_3D(ang,(ic-1),(j-1));
}
if ( VDELT_CONST != 0 )
{
PTR_OUT_6D(ang,(i-1),(j-1),(k-1),(i1-1),(i2-1))
= 2*PSI_1D(ang)
- PTR_IN_6D(ang,(i-1),(j-1),(k-1),(i1-1),(i2-1));
}
}
}
/* #else
for ( ang = 0; ang < NANG; ang++ )
{
PSI_1D(ang)
= PC_1D(ang)*DINV_4D(ang,(i-1),(j-1),(k-1));
}
cblas_dscal(NANG, -1, &PSII_3D(0, (j-1), (k-1)), 1);
cblas_daxpy(NANG, 2, psi, 1, &PSII_3D(0, (j-1), (k-1)), 1);
cblas_dscal(NANG, -1, &PSIJ_3D(0, (ic-1), (k-1)), 1);
cblas_daxpy(NANG, 2, psi, 1, &PSIJ_3D(0, (ic-1), (k-1)), 1);
if ( NDIMEN == 3 )
{
cblas_dscal(NANG, -1, &PSIK_3D(0, (ic-1), (j-1)), 1);
cblas_daxpy(NANG, 2, psi, 1, &PSIK_3D(0, (ic-1), (j-1)), 1);
}
if ( VDELT_CONST != 0 )
{
cblas_dscal(NANG, -1,
&PTR_IN_6D(0,(i-1),(j-1),(k-1),(i1-1),(i2-1)), 1);
cblas_daxpy(NANG, 2, psi, 1,
&PTR_IN_6D(0,(i-1),(j-1),(k-1),(i1-1),(i2-1)), 1);
}
#endif
*/
#elif defined MKLUPDATE
//cblas_dsbmv(CblasColMajor, CblasLower, NANG, 0, 1,
// &DINV_4D(0,(i-1),(j-1),(k-1)), 1, pc, 1, 0, psi, 1);
vmdMul(NANG, pc, &DINV_4D(0,(i-1),(j-1),(k-1)), psi,
VML_ACCURACY | VML_HANDLING | VML_ERROR);
cblas_dscal(NANG, -1, &PSII_3D(0, (j-1), (k-1)), 1);
cblas_daxpy(NANG, 2, psi, 1, &PSII_3D(0, (j-1), (k-1)), 1);
cblas_dscal(NANG, -1, &PSIJ_3D(0, (ic-1), (k-1)), 1);
cblas_daxpy(NANG, 2, psi, 1, &PSIJ_3D(0, (ic-1), (k-1)), 1);
if ( NDIMEN == 3 )
{
cblas_dscal(NANG, -1, &PSIK_3D(0, (ic-1), (j-1)), 1);
cblas_daxpy(NANG, 2, psi, 1, &PSIK_3D(0, (ic-1), (j-1)), 1);
}
if ( VDELT_CONST != 0 )
{
cblas_dscal(NANG, -1,
&PTR_IN_6D(0,(i-1),(j-1),(k-1),(i1-1),(i2-1)), 1);
cblas_daxpy(NANG, 2, psi, 1,
&PTR_IN_6D(0,(i-1),(j-1),(k-1),(i1-1),(i2-1)), 1);
}
#else
for ( ang = 0; ang < NANG; ang++ )
{
PSI_1D(ang)
= PC_1D(ang)*DINV_4D(ang,(i-1),(j-1),(k-1));
PSII_3D(ang,(j-1),(k-1))
= 2*PSI_1D(ang) - PSII_3D(ang,(j-1),(k-1));
PSIJ_3D(ang,(ic-1),(k-1))
= 2*PSI_1D(ang) - PSIJ_3D(ang,(ic-1),(k-1));
if ( NDIMEN == 3 )
{
PSIK_3D(ang,(ic-1),(j-1))
= 2*PSI_1D(ang) - PSIK_3D(ang,(ic-1),(j-1));
}
if ( VDELT_CONST != 0 )
{
PTR_OUT_6D(ang,(i-1),(j-1),(k-1),(i1-1),(i2-1))
= 2*PSI_1D(ang)
- PTR_IN_6D(ang,(i-1),(j-1),(k-1),(i1-1),(i2-1));
}
}
#endif
}
else
{
/***********************************************************************
* Multi-pass set to zero + rebalance fixup. Determine angles
* that will need fixup first.
***********************************************************************/
sum_hv = 0;
#ifdef USEMKL
#ifdef USEVML
for ( indx1 = 0; indx1 < 4; indx1++ )
{
for ( ang = 0; ang < NANG; ang++ )
{
HV_2D(ang, indx1) = 1;
}
}
sum_hv = cblas_dasum(NANG*4, hv, 1);
vmdMul( NANG, pc, &DINV_4D(0,(i-1),(j-1),(k-1)), pc,
VML_ACCURACY | VML_HANDLING | VML_ERROR );
#else
for ( indx1 = 0; indx1 < 4; indx1++ )
{
for ( ang = 0; ang < NANG; ang++ )
{
HV_2D(ang, indx1) = 1;
}
}
sum_hv = cblas_dasum(NANG*4, hv, 1);
for ( ang = 0; ang < NANG; ang++ )
{
PC_1D(ang) = PC_1D(ang)
* DINV_4D(ang,(i-1),(j-1),(k-1));
}
#endif
#else
for ( ang = 0; ang < NANG; ang++ )
{
for ( indx1 = 0; indx1 < 4; indx1++ )
{
HV_2D(ang, indx1) = 1;
sum_hv += HV_2D(ang,indx1);
}
PC_1D(ang) = PC_1D(ang)
* DINV_4D(ang,(i-1),(j-1),(k-1));
}
#endif
// fixup_loop
while (true)
{
sum_hv_tmp = 0;
#ifdef USEMKL
#ifdef USEVML
if (NANG > VECLEN_MIN)
{
for (ang = 0; ang < NANG; ang++ )
{
PC_2X[ang] = 2*PC_1D(ang);
}
vmdSub(NANG, PC_2X, &PSII_3D(0,(j-1),(k-1)),
&FXHV_2D(0,0),
VML_ACCURACY | VML_HANDLING | VML_ERROR );
vmdSub(NANG, PC_2X, &PSIJ_3D(0,(ic-1),(k-1)),
&FXHV_2D(0,1),
VML_ACCURACY | VML_HANDLING | VML_ERROR );
if ( NDIMEN == 3 )
{
vmdSub(NANG, PC_2X, &PSIK_3D(0,(ic-1),(j-1)),
&FXHV_2D(0,2),
VML_ACCURACY | VML_HANDLING | VML_ERROR );
}
if ( VDELT_CONST != 0 )
{
vmdSub(NANG, PC_2X,
&PTR_IN_6D(0,(i-1),(j-1),(k-1),(i1-1),(i2-1)),
&FXHV_2D(0,3),
VML_ACCURACY | VML_HANDLING | VML_ERROR );
}
for ( indx1 = 0; indx1 < 4; indx1++ )
{
for (ang = 0; ang < NANG; ang++ )
{
if ( FXHV_2D(ang,indx1) < 0 )
{
HV_2D(ang,indx1) = 0;
}
}
}
sum_hv_tmp = cblas_dasum(NANG*4, hv, 1);
}
else
{
for ( ang = 0; ang < NANG; ang++ )
{
FXHV_2D(ang,0) = 2*PC_1D(ang)
- PSII_3D(ang,(j-1),(k-1));
FXHV_2D(ang,1) = 2*PC_1D(ang)
- PSIJ_3D(ang,(ic-1),(k-1));
if ( NDIMEN == 3 )
{
FXHV_2D(ang,2) = 2*PC_1D(ang)
- PSIK_3D(ang,(ic-1),(j-1));
}
if ( VDELT_CONST != 0 )
{
FXHV_2D(ang,3) = 2*PC_1D(ang)
- PTR_IN_6D(ang,(i-1),(j-1),(k-1),(i1-1),(i2-1));
}
for ( indx1 = 0; indx1 < 4; indx1++ )
{
if ( FXHV_2D(ang,indx1) < 0 )
{
HV_2D(ang,indx1) = 0;
}
sum_hv_tmp += HV_2D(ang,indx1);
}
}
}
#else
unit_vec[0] = 1;
unit_vec[1] = 1;
unit_vec[2] = 0;
unit_vec[3] = 0;
cblas_dcopy(NANG, &PSII_3D(0,(j-1),(k-1)),
1, &FXHV_2D(0,0), 1);
cblas_dcopy(NANG, &PSIJ_3D(0,(ic-1),(k-1)),
1, &FXHV_2D(0,1), 1);
cblas_dscal(NANG*2, -1, &FXHV_2D(0,0), 1);
if ( NDIMEN == 3 )
{
cblas_dcopy(NANG, &PSIK_3D(0,(ic-1),(j-1)),
1, &FXHV_2D(0,2), 1);
cblas_dscal(NANG, -1, &FXHV_2D(0,2), 1);
unit_vec[2] = 1;
}
if ( VDELT_CONST != 0 )
{
cblas_dcopy(NANG, &PTR_IN_6D(0,(i-1),(j-1),(k-1),(i1-1),(i2-1)),
1, &FXHV_2D(0,3), 1);
cblas_dscal(NANG, -1, &FXHV_2D(0,3), 1);
unit_vec[3] = 1;
}
cblas_dger(CblasColMajor, NANG, 4, 2, pc, 1, unit_vec, 1, fxhv, NANG);
for ( indx1 = 0; indx1 < 4; indx1++ )
{
for ( ang = 0; ang < NANG; ang++ )
{
if ( FXHV_2D(ang,indx1) < 0 )
{
HV_2D(ang,indx1) = 0;
}
}
}
sum_hv_tmp = cblas_dasum(NANG*4, hv, 1);
#endif
#else
for ( ang = 0; ang < NANG; ang++ )
{
FXHV_2D(ang,0) = 2*PC_1D(ang)
- PSII_3D(ang,(j-1),(k-1));
FXHV_2D(ang,1) = 2*PC_1D(ang)
- PSIJ_3D(ang,(ic-1),(k-1));
if ( NDIMEN == 3 )
{
FXHV_2D(ang,2) = 2*PC_1D(ang)
- PSIK_3D(ang,(ic-1),(j-1));
}
if ( VDELT_CONST != 0 )
{
FXHV_2D(ang,3) = 2*PC_1D(ang)
- PTR_IN_6D(ang,(i-1),(j-1),(k-1),(i1-1),(i2-1));
}
for ( indx1 = 0; indx1 < 4; indx1++ )
{
if ( FXHV_2D(ang,indx1) < 0 )
{
HV_2D(ang,indx1) = 0;
}
sum_hv_tmp += HV_2D(ang,indx1);
}
}
#endif
/***********************************************************************
* Exit loop when all angles are fixed up
***********************************************************************/
if ( sum_hv == sum_hv_tmp ) break;
sum_hv = sum_hv_tmp;
/***********************************************************************
* Recompute balance equation numerator and denominator and get
* new cell average flux
***********************************************************************/
#ifdef USEVML
if (NANG > VECLEN_MIN)
{
cblas_dcopy(NANG*4, hv, 1, hv_p1, 1);
for ( ang = 0; ang < NANG*4; ang++ )
{
hv_p1[ang] += 1;
}
vmdMul(NANG, MU, &hv_p1[NANG*0], &hv_p1[NANG*0],
VML_ACCURACY | VML_HANDLING | VML_ERROR );
vmdMul(NANG, &PSII_3D(0,(j-1),(k-1)),
&hv_p1[NANG*0], &hv_p1[NANG*0],
VML_ACCURACY | VML_HANDLING | VML_ERROR );
cblas_dscal(NANG, HI, &hv_p1[NANG*0], 1);
vmdMul(NANG, HJ, &hv_p1[NANG*1], &hv_p1[NANG*1],
VML_ACCURACY | VML_HANDLING | VML_ERROR );
vmdMul(NANG, &PSIJ_3D(0,(ic-1),(k-1)),
&hv_p1[NANG*1], &hv_p1[NANG*1],
VML_ACCURACY | VML_HANDLING | VML_ERROR );
vmdMul(NANG, HK, &hv_p1[NANG*2], &hv_p1[NANG*2],
VML_ACCURACY | VML_HANDLING | VML_ERROR );
vmdMul(NANG, &PSIK_3D(0,(ic-1),(j-1)),
&hv_p1[NANG*2], &hv_p1[NANG*2],
VML_ACCURACY | VML_HANDLING | VML_ERROR );
vmdAdd(NANG, &hv_p1[NANG*0], &hv_p1[NANG*1], pc,
VML_ACCURACY | VML_HANDLING | VML_ERROR );
vmdAdd(NANG, &hv_p1[NANG*2], pc, pc,
VML_ACCURACY | VML_HANDLING | VML_ERROR );
if (VDELT_CONST != 0 )
{
vmdMul(NANG, &PTR_IN_6D(0,(i-1),(j-1),(k-1),(i1-1),(i2-1)),
&hv_p1[NANG*3], &hv_p1[NANG*3],
VML_ACCURACY | VML_HANDLING | VML_ERROR );
cblas_daxpy(NANG, VDELT_CONST, &hv_p1[NANG*3], 1,
pc, 1);
}
cblas_dscal(NANG, 0.5, pc, 1);
cblas_daxpy(NANG, 1, psi, 1, pc, 1);
for ( ang = 0; ang < NANG; ang++ )
{
DEN_1D(ang) = T_XS_3D((i-1),(j-1),(k-1))
+ MU_1D(ang) * HI * HV_2D(ang,0)
+ HJ_1D(ang) * HV_2D(ang,1)
+ HK_1D(ang) * HV_2D(ang,2)
+ VDELT_CONST * HV_2D(ang,3);
if ( DEN_1D(ang) > TOLR )
{
PC_1D(ang) /= DEN_1D(ang);
}
else
{
PC_1D(ang) = 0;
}
}
}
else
{
for ( ang = 0; ang < NANG; ang++ )
{
PC_1D(ang) = PSII_3D(ang,(j-1),(k-1))
* MU_1D(ang) * HI * (1+HV_2D(ang,0))
+ PSIJ_3D(ang,(ic-1),(k-1))
* HJ_1D(ang) * (1+HV_2D(ang,1))
+ PSIK_3D(ang,(ic-1),(j-1))
* HK_1D(ang) * (1+HV_2D(ang,2));
if ( VDELT_CONST != 0 )
{
PC_1D(ang) += VDELT_CONST
* PTR_IN_6D(ang,(i-1),(j-1),(k-1),(i1-1),(i2-1))
* (1+HV_2D(ang,3));
}
PC_1D(ang) = PSI_1D(ang) + 0.5*PC_1D(ang);
DEN_1D(ang) = T_XS_3D((i-1),(j-1),(k-1))
+ MU_1D(ang) * HI * HV_2D(ang,0)
+ HJ_1D(ang) * HV_2D(ang,1)
+ HK_1D(ang) * HV_2D(ang,2)
+ VDELT_CONST * HV_2D(ang,3);
if ( DEN_1D(ang) > TOLR )
{
PC_1D(ang) /= DEN_1D(ang);
}
else
{
PC_1D(ang) = 0;
}
}
}
#elif defined MKLUPDATE
for ( ang = 0; ang < NANG; ang++ )
{
PC_1D(ang) = PSII_3D(ang,(j-1),(k-1))
* MU_1D(ang) * HI * (1+HV_2D(ang,0))
+ PSIJ_3D(ang,(ic-1),(k-1))
* HJ_1D(ang) * (1+HV_2D(ang,1))
+ PSIK_3D(ang,(ic-1),(j-1))
* HK_1D(ang) * (1+HV_2D(ang,2));
if ( VDELT_CONST != 0 )
{
PC_1D(ang) += VDELT_CONST
* PTR_IN_6D(ang,(i-1),(j-1),(k-1),(i1-1),(i2-1))
* (1+HV_2D(ang,3));
}
DEN_1D(ang) = T_XS_3D((i-1),(j-1),(k-1))
+ MU_1D(ang) * HI * HV_2D(ang,0)
+ HJ_1D(ang) * HV_2D(ang,1)
+ HK_1D(ang) * HV_2D(ang,2)
+ VDELT_CONST * HV_2D(ang,3);
}
cblas_dscal(NANG, 0.5, pc, 1);
cblas_daxpy(NANG, 1, psi, 1, pc, 1);
for ( ang = 0; ang < NANG; ang++ )
{
if ( DEN_1D(ang) > TOLR )
{
PC_1D(ang) /= DEN_1D(ang);
}
else
{
PC_1D(ang) = 0;
}
}
#else
for ( ang = 0; ang < NANG; ang++ )
{
PC_1D(ang) = PSII_3D(ang,(j-1),(k-1))
* MU_1D(ang) * HI * (1+HV_2D(ang,0))
+ PSIJ_3D(ang,(ic-1),(k-1))
* HJ_1D(ang) * (1+HV_2D(ang,1))
+ PSIK_3D(ang,(ic-1),(j-1))
* HK_1D(ang) * (1+HV_2D(ang,2));
if ( VDELT_CONST != 0 )
{
PC_1D(ang) += VDELT_CONST
* PTR_IN_6D(ang,(i-1),(j-1),(k-1),(i1-1),(i2-1))
* (1+HV_2D(ang,3));
}
PC_1D(ang) = PSI_1D(ang) + 0.5*PC_1D(ang);
DEN_1D(ang) = T_XS_3D((i-1),(j-1),(k-1))
+ MU_1D(ang) * HI * HV_2D(ang,0)
+ HJ_1D(ang) * HV_2D(ang,1)
+ HK_1D(ang) * HV_2D(ang,2)
+ VDELT_CONST * HV_2D(ang,3);
if ( DEN_1D(ang) > TOLR )
{
PC_1D(ang) /= DEN_1D(ang);
}
else
{
PC_1D(ang) = 0;
}
}
#endif
} // end fixup_loop
/***********************************************************************
* Fixup done, compute edges
***********************************************************************/
#ifdef USEVML
if (NANG > VECLEN_MIN)
{
cblas_dcopy(NANG, pc, 1, psi, 1);
vmdMul(NANG, &FXHV_2D(0,0), &HV_2D(0,0),
&PSII_3D(0,(j-1),(k-1)),
VML_ACCURACY | VML_HANDLING | VML_ERROR );
vmdMul(NANG, &FXHV_2D(0,1), &HV_2D(0,1),
&PSIJ_3D(0,(ic-1),(k-1)),
VML_ACCURACY | VML_HANDLING | VML_ERROR );
if ( NDIMEN == 3 )
{
vmdMul(NANG, &FXHV_2D(0,2), &HV_2D(0,2),
&PSIK_3D(0,(ic-1),(j-1)),
VML_ACCURACY | VML_HANDLING | VML_ERROR );
}
if ( VDELT_CONST != 0 )
{
vmdMul(NANG, &FXHV_2D(0,3), &HV_2D(0,3),
&PTR_OUT_6D(0,(i-1),(j-1),(k-1),(i1-1),(i2-1)),
VML_ACCURACY | VML_HANDLING | VML_ERROR );
}
}
else
{
for ( ang = 0; ang < NANG; ang++ )
{
PSI_1D(ang) = PC_1D(ang);
PSII_3D(ang,(j-1),(k-1))
= FXHV_2D(ang,0) * HV_2D(ang,0);
PSIJ_3D(ang,(ic-1),(k-1))
= FXHV_2D(ang,1) * HV_2D(ang,1);
if ( NDIMEN == 3 )
{
PSIK_3D(ang,(ic-1),(j-1))
= FXHV_2D(ang,2) * HV_2D(ang,2);
}
if ( VDELT_CONST != 0 )
{
PTR_OUT_6D(ang,(i-1),(j-1),(k-1),(i1-1),(i2-1))
= FXHV_2D(ang,3) * HV_2D(ang,3);
}
}
}
#else
for ( ang = 0; ang < NANG; ang++ )
{
PSI_1D(ang) = PC_1D(ang);
PSII_3D(ang,(j-1),(k-1))
= FXHV_2D(ang,0) * HV_2D(ang,0);
PSIJ_3D(ang,(ic-1),(k-1))
= FXHV_2D(ang,1) * HV_2D(ang,1);
if ( NDIMEN == 3 )
{
PSIK_3D(ang,(ic-1),(j-1))
= FXHV_2D(ang,2) * HV_2D(ang,2);
}
if ( VDELT_CONST != 0 )
{
PTR_OUT_6D(ang,(i-1),(j-1),(k-1),(i1-1),(i2-1))
= FXHV_2D(ang,3) * HV_2D(ang,3);
}
}
#endif
}
/***********************************************************************
* Clear the flux arrays
***********************************************************************/
if ( oct == 1 )
{
FLUX_4D((i-1),(j-1),(k-1),(g-1)) = 0;
for ( indx1 = 0; indx1 < (CMOM-1); indx1++ )
{
FLUXM_5D(indx1,(i-1),(j-1),(k-1),(g-1)) = 0;
}
}
/***********************************************************************
* Compute the flux moments
***********************************************************************/
#ifdef USEMKL
vmdMul(NANG, W, psi, w_psi,
VML_ACCURACY | VML_HANDLING | VML_ERROR);
FLUX_4D((i-1),(j-1),(k-1),(g-1)) += cblas_ddot(NANG, W, 1, psi, 1);
for ( l = 1; l <= (CMOM-1); l++ )
{
FLUXM_5D((l-1),(i-1),(j-1),(k-1),(g-1)) += cblas_ddot(NANG, &EC_2D(0,l), 1, w_psi,1);
}
#else
sum_wpsi = 0;
for ( ang = 0; ang < NANG; ang++ )
{
sum_wpsi += W_1D(ang)*PSI_1D(ang);
}
FLUX_4D((i-1),(j-1),(k-1),(g-1)) += sum_wpsi;
for ( l = 1; l <= (CMOM-1); l++ )
{
sum_ecwpsi = 0;
for ( ang = 0; ang < NANG; ang++ )
{
sum_ecwpsi += EC_2D(ang,(l))*W_1D(ang)*PSI_1D(ang);
}
FLUXM_5D((l-1),(i-1),(j-1),(k-1),(g-1)) += sum_ecwpsi;
}
#endif
/***********************************************************************
* Calculate min and max scalar fluxes (not used elsewhere
* currently)
***********************************************************************/
if ( oct == NOCT )
{
FMIN = MIN( FMIN, FLUX_3D((i-1),(j-1),(k-1)) );
FMAX = MAX( FMAX, FLUX_3D((i-1),(j-1),(k-1)) );
}
/***********************************************************************
* Save edge fluxes (dummy if checks for unused non-vacuum BCs)
***********************************************************************/
if ( j == jhi )
{
if ( jd==2 && LASTY )
{
// CONTINUE
}
else if ( jd == 1 && FIRSTY )
{
if ( ibb == 1 )
{
// CONTINUE
}
}
else
{
#ifdef USEMKL
cblas_dcopy(NANG, &PSIJ_3D(0,(ic-1),(k-1)), 1,
&JB_OUT_3D(0,(ic-1),(k-1)), 1);
#else
for ( ang = 0; ang < NANG; ang++ )
{
JB_OUT_3D(ang,(ic-1),(k-1))
= PSIJ_3D(ang,(ic-1),(k-1));
}
#endif
}
}
if ( k == khi )
{
if ( kd == 2 && LASTZ )
{
// CONTINUE
}
else if ( kd==1 && FIRSTZ )
{
if ( ibf == 1 )
{
// CONTINUE
}
}
else
{
#ifdef USEMKL
cblas_dcopy(NANG, &PSIK_3D(0,(ic-1),(j-1)), 1,
&KB_OUT_3D(0,(ic-1),(j-1)), 1);
#else
for ( ang = 0; ang < NANG; ang++ )
{
KB_OUT_3D(ang,(ic-1),(j-1))
= PSIK_3D(ang,(ic-1),(j-1));
}
#endif
}
}
/***********************************************************************
* Compute leakages (not used elsewhere currently)
***********************************************************************/
if ( ((i+id-1) == 1) || ((i+id-1) == (NX+1)) )
{
#ifdef USEMKL
FLKX_3D((i+id-1-1),(j-1),(k-1))
+= ist*cblas_ddot(NANG, &WMU_1D(0), 1,
&PSII_3D(0,(j-1),(k-1)), 1);
#else
sum_wmupsii = 0;
for ( ang = 0; ang < NANG; ang++ )
{
sum_wmupsii
+= WMU_1D(ang) * PSII_3D(ang,(j-1),(k-1));
}
FLKX_3D((i+id-1-1),(j-1),(k-1))
+= ist*sum_wmupsii;
#endif
}
if ( (jd==1 && FIRSTY) || (jd==2 && LASTY) )
{
#ifdef USEMKL
FLKY_3D((i-1),(j+jd-1-1),(k-1))
+= jst*cblas_ddot(NANG, &WETA_1D(0), 1,
&PSIJ_3D(0,(ic-1),(k-1)), 1);
#else
sum_wetapsij = 0;
for ( ang = 0; ang < NANG; ang++ )
{
sum_wetapsij
+= WETA_1D(ang) * PSIJ_3D(ang,(ic-1),(k-1));
}
FLKY_3D((i-1),(j+jd-1-1),(k-1))
+= jst*sum_wetapsij;
#endif
}
if ( ((kd == 1 && FIRSTZ) || (kd == 2 && LASTZ)) && NDIMEN == 3 )
{
#ifdef USEMKL
FLKZ_3D((i-1),(j-1),(k+kd-1-1))
+= kst*cblas_ddot(NANG, &WXI_1D(0), 1,
&PSIK_3D(0,(ic-1),(j-1)), 1);
#else
sum_wxipsik = 0;
for ( ang = 0; ang < NANG; ang++ )
{
sum_wxipsik
+= WXI_1D(ang) * PSIK_3D(ang,(ic-1),(j-1));
}
FLKZ_3D((i-1),(j-1),(k+kd-1-1))
+= kst*sum_wxipsik;
#endif
}
}
/***********************************************************************
* Finish the loops
***********************************************************************/
} // end line_loop
} // end diagonal_loop
// } // omp end parallel
}
|
sp_single.c | /*--------------------------------------------------------------------
NAS Parallel Benchmarks 2.3 OpenMP C versions - SP
This benchmark is an OpenMP C version of the NPB SP code.
The OpenMP C versions are developed by RWCP and derived from the serial
Fortran versions in "NPB 2.3-serial" developed by NAS.
Permission to use, copy, distribute and modify this software for any
purpose with or without fee is hereby granted.
This software is provided "as is" without express or implied warranty.
Send comments on the OpenMP C versions to pdp-openmp@rwcp.or.jp
Information on OpenMP activities at RWCP is available at:
http://pdplab.trc.rwcp.or.jp/pdperf/Omni/
Information on NAS Parallel Benchmarks 2.3 is available at:
http://www.nas.nasa.gov/NAS/NPB/
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
Author: R. Van der Wijngaart
W. Saphir
OpenMP C version: S. Satoh
--------------------------------------------------------------------*/
//#include "npb-C.h"
/*
NAS Parallel Benchmarks 2.3 OpenMP C Versions
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#if defined(_OPENMP)
#include <omp.h>
#endif /* _OPENMP */
typedef int boolean;
typedef struct { double real; double imag; } dcomplex;
#define TRUE 1
#define FALSE 0
#define max(a,b) (((a) > (b)) ? (a) : (b))
#define min(a,b) (((a) < (b)) ? (a) : (b))
#define pow2(a) ((a)*(a))
#define get_real(c) c.real
#define get_imag(c) c.imag
#define cadd(c,a,b) (c.real = a.real + b.real, c.imag = a.imag + b.imag)
#define csub(c,a,b) (c.real = a.real - b.real, c.imag = a.imag - b.imag)
#define cmul(c,a,b) (c.real = a.real * b.real - a.imag * b.imag, \
c.imag = a.real * b.imag + a.imag * b.real)
#define crmul(c,a,b) (c.real = a.real * b, c.imag = a.imag * b)
extern double randlc(double *, double);
extern void vranlc(int, double *, double, double *);
extern void timer_clear(int);
extern void timer_start(int);
extern void timer_stop(int);
extern double timer_read(int);
extern void c_print_results(char *name, char cclass, int n1, int n2,
int n3, int niter, int nthreads, double t,
double mops, char *optype, int passed_verification,
char *npbversion, char *compiletime, char *cc,
char *clink, char *c_lib, char *c_inc,
char *cflags, char *clinkflags, char *rand);
/* global variables */
//#include "header.h"
/******************/
/* default values */
/******************/
#ifndef CLASS
#define CLASS 'S'
#endif
#if CLASS == 'S'
/* CLASS = S */
/*
c This file is generated automatically by the setparams utility.
c It sets the number of processors and the classc of the NPB
c in this directory. Do not modify it by hand.
*/
#define PROBLEM_SIZE 12
#define NITER_DEFAULT 100
#define DT_DEFAULT 0.015
#define CONVERTDOUBLE FALSE
#endif
#if CLASS == 'W'
/* CLASS = W */
/*
c This file is generated automatically by the setparams utility.
c It sets the number of processors and the classc of the NPB
c in this directory. Do not modify it by hand.
*/
#define PROBLEM_SIZE 36
#define NITER_DEFAULT 400
#define DT_DEFAULT 0.0015
#define CONVERTDOUBLE FALSE
#endif
#if CLASS == 'A'
/* CLASS = A */
/*
c This file is generated automatically by the setparams utility.
c It sets the number of processors and the classc of the NPB
c in this directory. Do not modify it by hand.
*/
#define PROBLEM_SIZE 64
#define NITER_DEFAULT 400
#define DT_DEFAULT 0.0015
#define CONVERTDOUBLE FALSE
#endif
#if CLASS == 'B'
/* CLASS = B */
/*
c This file is generated automatically by the setparams utility.
c It sets the number of processors and the classc of the NPB
c in this directory. Do not modify it by hand.
*/
#define PROBLEM_SIZE 102
#define NITER_DEFAULT 400
#define DT_DEFAULT 0.001
#define CONVERTDOUBLE FALSE
#endif
#if CLASS == 'C'
/* CLASS = C */
/*
c This file is generated automatically by the setparams utility.
c It sets the number of processors and the classc of the NPB
c in this directory. Do not modify it by hand.
*/
#define PROBLEM_SIZE 162
#define NITER_DEFAULT 400
#define DT_DEFAULT 0.00067
#define CONVERTDOUBLE FALSE
#endif
#define COMPILETIME "28 Oct 2014"
#define NPBVERSION "2.3"
#define CS1 "gcc"
#define CS2 "$(CC)"
#define CS3 "(none)"
#define CS4 "-I../common"
#define CS5 "-fopenmp -O2"
#define CS6 "-lm -fopenmp"
#define CS7 "randdp"
/* common /global */
static int grid_points[3];
/* common /constants/ */
static double tx1, tx2, tx3, ty1, ty2, ty3, tz1, tz2, tz3,
dx1, dx2, dx3, dx4, dx5, dy1, dy2, dy3, dy4,
dy5, dz1, dz2, dz3, dz4, dz5, dssp, dt,
ce[13][5], dxmax, dymax, dzmax, xxcon1, xxcon2,
xxcon3, xxcon4, xxcon5, dx1tx1, dx2tx1, dx3tx1,
dx4tx1, dx5tx1, yycon1, yycon2, yycon3, yycon4,
yycon5, dy1ty1, dy2ty1, dy3ty1, dy4ty1, dy5ty1,
zzcon1, zzcon2, zzcon3, zzcon4, zzcon5, dz1tz1,
dz2tz1, dz3tz1, dz4tz1, dz5tz1, dnxm1, dnym1,
dnzm1, c1c2, c1c5, c3c4, c1345, conz1, c1, c2,
c3, c4, c5, c4dssp, c5dssp, dtdssp, dttx1, bt,
dttx2, dtty1, dtty2, dttz1, dttz2, c2dttx1,
c2dtty1, c2dttz1, comz1, comz4, comz5, comz6,
c3c4tx3, c3c4ty3, c3c4tz3, c2iv, con43, con16;
#define IMAX PROBLEM_SIZE
#define JMAX PROBLEM_SIZE
#define KMAX PROBLEM_SIZE
/*--------------------------------------------------------------------
c To improve cache performance, first two dimensions padded by 1
c for even number sizes only
c-------------------------------------------------------------------*/
/* common /fields/ */
static double u [5][IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1],
us [IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1],
vs [IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1],
ws [IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1],
qs [IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1],
ainv [IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1],
rho_i [IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1],
speed [IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1],
square [IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1],
rhs [5][IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1],
forcing [5][IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1],
lhs [15][IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1];
/* common /work_1d/ */
static double cv[PROBLEM_SIZE], rhon[PROBLEM_SIZE],
rhos[PROBLEM_SIZE], rhoq[PROBLEM_SIZE],
cuf[PROBLEM_SIZE], q[PROBLEM_SIZE],
ue[5][PROBLEM_SIZE], buf[5][PROBLEM_SIZE];
/* function declarations */
static void add(void);
static void adi(void);
static void error_norm(double rms[5]);
static void rhs_norm(double rms[5]);
static void exact_rhs(void);
static void exact_solution(double xi, double eta, double zeta,
double dtemp[5]);
static void initialize(void);
static void lhsinit(void);
static void lhsx(void);
static void lhsy(void);
static void lhsz(void);
static void ninvr(void);
static void pinvr(void);
static void compute_rhs(void);
static void set_constants(void);
static void txinvr(void);
static void tzetar(void);
static void verify(int no_time_steps, char *cclass, boolean *verified);
static void x_solve(void);
static void y_solve(void);
static void z_solve(void);
/*--------------------------------------------------------------------
program SP
c-------------------------------------------------------------------*/
int main(int argc, char **argv) {
int niter, step;
double mflops, tmax;
int nthreads = 1;
boolean verified;
char cclass;
FILE *fp;
/*--------------------------------------------------------------------
c Read input file (if it exists), else take
c defaults from parameters
c-------------------------------------------------------------------*/
printf("\n\n NAS Parallel Benchmarks 2.3 OpenMP C version"
" - SP Benchmark\n\n");
fp = fopen("inputsp.data", "r");
if (fp != NULL) {
printf(" Reading from input file inputsp.data\n");
fscanf(fp, "%d", &niter);
while (fgetc(fp) != '\n');
fscanf(fp, "%lf", &dt);
while (fgetc(fp) != '\n');
fscanf(fp, "%d%d%d",
&grid_points[0], &grid_points[1], &grid_points[2]);
fclose(fp);
} else {
printf(" No input file inputsp.data. Using compiled defaults");
niter = NITER_DEFAULT;
dt = DT_DEFAULT;
grid_points[0] = PROBLEM_SIZE;
grid_points[1] = PROBLEM_SIZE;
grid_points[2] = PROBLEM_SIZE;
}
printf(" Size: %3dx%3dx%3d\n",
grid_points[0], grid_points[1], grid_points[2]);
printf(" Iterations: %3d dt: %10.6f\n", niter, dt);
if ( (grid_points[0] > IMAX) ||
(grid_points[1] > JMAX) ||
(grid_points[2] > KMAX) ) {
printf("%d, %d, %d\n", grid_points[0], grid_points[1], grid_points[2]);
printf(" Problem size too big for compiled array sizes\n");
exit(1);
}
set_constants();
initialize();
lhsinit();
exact_rhs();
/*--------------------------------------------------------------------
c do one time step to touch all code, and reinitialize
c-------------------------------------------------------------------*/
#pragma omp parallel
{
adi();
}
initialize();
timer_clear(1);
timer_start(1);
#pragma omp parallel private(step)
{
for (step = 1; step <= niter; step++) {
if (step % 20 == 0 || step == 1) {
#pragma omp master
printf(" Time step %4d\n", step);
}
adi();
}
#if defined(_OPENMP)
#pragma omp master
nthreads = omp_get_num_threads();
#endif /* _OPENMP */
} /* end parallel */
timer_stop(1);
tmax = timer_read(1);
verify(niter, &cclass, &verified);
if (tmax != 0) {
mflops = ( 881.174 * pow((double)PROBLEM_SIZE, 3.0)
- 4683.91 * pow2((double)PROBLEM_SIZE)
+ 11484.5 * (double)PROBLEM_SIZE
- 19272.4) * (double)niter / (tmax*1000000.0);
} else {
mflops = 0.0;
}
c_print_results("SP", cclass, grid_points[0],
grid_points[1], grid_points[2], niter, nthreads,
tmax, mflops, " floating point",
verified, NPBVERSION, COMPILETIME, CS1, CS2, CS3, CS4, CS5,
CS6, "(none)");
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void add(void) {
int i, j, k, m;
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c addition of update to the vector u
c-------------------------------------------------------------------*/
#pragma omp for
for (m = 0; m < 5; m++) {
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
u[m][i][j][k] = u[m][i][j][k] + rhs[m][i][j][k];
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void adi(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
compute_rhs();
txinvr();
x_solve();
y_solve();
z_solve();
add();
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void error_norm(double rms[5]) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c this function computes the norm of the difference between the
c computed solution and the exact solution
c-------------------------------------------------------------------*/
int i, j, k, m, d;
double xi, eta, zeta, u_exact[5], add;
for (m = 0; m < 5; m++) {
rms[m] = 0.0;
}
for (i = 0; i <= grid_points[0]-1; i++) {
xi = (double)i * dnxm1;
for (j = 0; j <= grid_points[1]-1; j++) {
eta = (double)j * dnym1;
for (k = 0; k <= grid_points[2]-1; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, u_exact);
for (m = 0; m < 5; m++) {
add = u[m][i][j][k] - u_exact[m];
rms[m] = rms[m] + add*add;
}
}
}
}
for (m = 0; m < 5; m++) {
for (d = 0; d < 3; d++) {
rms[m] = rms[m] / (double)(grid_points[d]-2);
}
rms[m] = sqrt(rms[m]);
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void rhs_norm(double rms[5]) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
int i, j, k, d, m;
double add;
for (m = 0; m < 5; m++) {
rms[m] = 0.0;
}
for (i = 0; i <= grid_points[0]-2; i++) {
for (j = 0; j <= grid_points[1]-2; j++) {
for (k = 0; k <= grid_points[2]-2; k++) {
for (m = 0; m < 5; m++) {
add = rhs[m][i][j][k];
rms[m] = rms[m] + add*add;
}
}
}
}
for (m = 0; m < 5; m++) {
for (d = 0; d < 3; d++) {
rms[m] = rms[m] / (double)(grid_points[d]-2);
}
rms[m] = sqrt(rms[m]);
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void exact_rhs(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c compute the right hand side based on exact solution
c-------------------------------------------------------------------*/
double dtemp[5], xi, eta, zeta, dtpp;
int m, i, j, k, ip1, im1, jp1, jm1, km1, kp1;
/*--------------------------------------------------------------------
c initialize
c-------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
for (i = 0; i <= grid_points[0]-1; i++) {
for (j = 0; j <= grid_points[1]-1; j++) {
for (k= 0; k <= grid_points[2]-1; k++) {
forcing[m][i][j][k] = 0.0;
}
}
}
}
/*--------------------------------------------------------------------
c xi-direction flux differences
c-------------------------------------------------------------------*/
for (k = 1; k <= grid_points[2]-2; k++) {
zeta = (double)k * dnzm1;
for (j = 1; j <= grid_points[1]-2; j++) {
eta = (double)j * dnym1;
for (i = 0; i <= grid_points[0]-1; i++) {
xi = (double)i * dnxm1;
exact_solution(xi, eta, zeta, dtemp);
for (m = 0; m < 5; m++) {
ue[m][i] = dtemp[m];
}
dtpp = 1.0 / dtemp[0];
for (m = 1; m < 5; m++) {
buf[m][i] = dtpp * dtemp[m];
}
cuf[i] = buf[1][i] * buf[1][i];
buf[0][i] = cuf[i] + buf[2][i] * buf[2][i] + buf[3][i] * buf[3][i];
q[i] = 0.5 * (buf[1][i]*ue[1][i] + buf[2][i]*ue[2][i]
+ buf[3][i]*ue[3][i]);
}
for (i = 1; i <= grid_points[0]-2; i++) {
im1 = i-1;
ip1 = i+1;
forcing[0][i][j][k] = forcing[0][i][j][k] -
tx2*( ue[1][ip1]-ue[1][im1] )+
dx1tx1*(ue[0][ip1]-2.0*ue[0][i]+ue[0][im1]);
forcing[1][i][j][k] = forcing[1][i][j][k]
- tx2 * ((ue[1][ip1]*buf[1][ip1]+c2*(ue[4][ip1]-q[ip1]))-
(ue[1][im1]*buf[1][im1]+c2*(ue[4][im1]-q[im1])))+
xxcon1*(buf[1][ip1]-2.0*buf[1][i]+buf[1][im1])+
dx2tx1*( ue[1][ip1]-2.0* ue[1][i]+ue[1][im1]);
forcing[2][i][j][k] = forcing[2][i][j][k]
- tx2 * (ue[2][ip1]*buf[1][ip1]-ue[2][im1]*buf[1][im1])+
xxcon2*(buf[2][ip1]-2.0*buf[2][i]+buf[2][im1])+
dx3tx1*( ue[2][ip1]-2.0*ue[2][i] +ue[2][im1]);
forcing[3][i][j][k] = forcing[3][i][j][k]
- tx2*(ue[3][ip1]*buf[1][ip1]-ue[3][im1]*buf[1][im1])+
xxcon2*(buf[3][ip1]-2.0*buf[3][i]+buf[3][im1])+
dx4tx1*( ue[3][ip1]-2.0* ue[3][i]+ ue[3][im1]);
forcing[4][i][j][k] = forcing[4][i][j][k]
- tx2*(buf[1][ip1]*(c1*ue[4][ip1]-c2*q[ip1])-
buf[1][im1]*(c1*ue[4][im1]-c2*q[im1]))+
0.5*xxcon3*(buf[0][ip1]-2.0*buf[0][i]+
buf[0][im1])+
xxcon4*(cuf[ip1]-2.0*cuf[i]+cuf[im1])+
xxcon5*(buf[4][ip1]-2.0*buf[4][i]+buf[4][im1])+
dx5tx1*( ue[4][ip1]-2.0* ue[4][i]+ ue[4][im1]);
}
/*--------------------------------------------------------------------
c Fourth-order dissipation
c-------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
i = 1;
forcing[m][i][j][k] = forcing[m][i][j][k] - dssp *
(5.0*ue[m][i] - 4.0*ue[m][i+1] +ue[m][i+2]);
i = 2;
forcing[m][i][j][k] = forcing[m][i][j][k] - dssp *
(-4.0*ue[m][i-1] + 6.0*ue[m][i] -
4.0*ue[m][i+1] + ue[m][i+2]);
}
for (m = 0; m < 5; m++) {
for (i = 3; i <= grid_points[0]-4; i++) {
forcing[m][i][j][k] = forcing[m][i][j][k] - dssp*
(ue[m][i-2] - 4.0*ue[m][i-1] +
6.0*ue[m][i] - 4.0*ue[m][i+1] + ue[m][i+2]);
}
}
for (m = 0; m < 5; m++) {
i = grid_points[0]-3;
forcing[m][i][j][k] = forcing[m][i][j][k] - dssp *
(ue[m][i-2] - 4.0*ue[m][i-1] +
6.0*ue[m][i] - 4.0*ue[m][i+1]);
i = grid_points[0]-2;
forcing[m][i][j][k] = forcing[m][i][j][k] - dssp *
(ue[m][i-2] - 4.0*ue[m][i-1] + 5.0*ue[m][i]);
}
}
}
/*--------------------------------------------------------------------
c eta-direction flux differences
c-------------------------------------------------------------------*/
for (k = 1; k <= grid_points[2]-2; k++) {
zeta = (double)k * dnzm1;
for (i = 1; i <= grid_points[0]-2; i++) {
xi = (double)i * dnxm1;
for (j = 0; j <= grid_points[1]-1; j++) {
eta = (double)j * dnym1;
exact_solution(xi, eta, zeta, dtemp);
for (m = 0; m < 5; m++) {
ue[m][j] = dtemp[m];
}
dtpp = 1.0/dtemp[0];
for (m = 1; m < 5; m++) {
buf[m][j] = dtpp * dtemp[m];
}
cuf[j] = buf[2][j] * buf[2][j];
buf[0][j] = cuf[j] + buf[1][j] * buf[1][j] +
buf[3][j] * buf[3][j];
q[j] = 0.5*(buf[1][j]*ue[1][j] + buf[2][j]*ue[2][j] +
buf[3][j]*ue[3][j]);
}
for (j = 1; j <= grid_points[1]-2; j++) {
jm1 = j-1;
jp1 = j+1;
forcing[0][i][j][k] = forcing[0][i][j][k] -
ty2*( ue[2][jp1]-ue[2][jm1] )+
dy1ty1*(ue[0][jp1]-2.0*ue[0][j]+ue[0][jm1]);
forcing[1][i][j][k] = forcing[1][i][j][k]
- ty2*(ue[1][jp1]*buf[2][jp1]-ue[1][jm1]*buf[2][jm1])+
yycon2*(buf[1][jp1]-2.0*buf[1][j]+buf[1][jm1])+
dy2ty1*( ue[1][jp1]-2.0* ue[1][j]+ ue[1][jm1]);
forcing[2][i][j][k] = forcing[2][i][j][k]
- ty2*((ue[2][jp1]*buf[2][jp1]+c2*(ue[4][jp1]-q[jp1]))-
(ue[2][jm1]*buf[2][jm1]+c2*(ue[4][jm1]-q[jm1])))+
yycon1*(buf[2][jp1]-2.0*buf[2][j]+buf[2][jm1])+
dy3ty1*( ue[2][jp1]-2.0*ue[2][j] +ue[2][jm1]);
forcing[3][i][j][k] = forcing[3][i][j][k]
- ty2*(ue[3][jp1]*buf[2][jp1]-ue[3][jm1]*buf[2][jm1])+
yycon2*(buf[3][jp1]-2.0*buf[3][j]+buf[3][jm1])+
dy4ty1*( ue[3][jp1]-2.0*ue[3][j]+ ue[3][jm1]);
forcing[4][i][j][k] = forcing[4][i][j][k]
- ty2*(buf[2][jp1]*(c1*ue[4][jp1]-c2*q[jp1])-
buf[2][jm1]*(c1*ue[4][jm1]-c2*q[jm1]))+
0.5*yycon3*(buf[0][jp1]-2.0*buf[0][j]+
buf[0][jm1])+
yycon4*(cuf[jp1]-2.0*cuf[j]+cuf[jm1])+
yycon5*(buf[4][jp1]-2.0*buf[4][j]+buf[4][jm1])+
dy5ty1*(ue[4][jp1]-2.0*ue[4][j]+ue[4][jm1]);
}
/*--------------------------------------------------------------------
c Fourth-order dissipation
c-------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
j = 1;
forcing[m][i][j][k] = forcing[m][i][j][k] - dssp *
(5.0*ue[m][j] - 4.0*ue[m][j+1] +ue[m][j+2]);
j = 2;
forcing[m][i][j][k] = forcing[m][i][j][k] - dssp *
(-4.0*ue[m][j-1] + 6.0*ue[m][j] -
4.0*ue[m][j+1] + ue[m][j+2]);
}
for (m = 0; m < 5; m++) {
for (j = 3; j <= grid_points[1]-4; j++) {
forcing[m][i][j][k] = forcing[m][i][j][k] - dssp*
(ue[m][j-2] - 4.0*ue[m][j-1] +
6.0*ue[m][j] - 4.0*ue[m][j+1] + ue[m][j+2]);
}
}
for (m = 0; m < 5; m++) {
j = grid_points[1]-3;
forcing[m][i][j][k] = forcing[m][i][j][k] - dssp *
(ue[m][j-2] - 4.0*ue[m][j-1] +
6.0*ue[m][j] - 4.0*ue[m][j+1]);
j = grid_points[1]-2;
forcing[m][i][j][k] = forcing[m][i][j][k] - dssp *
(ue[m][j-2] - 4.0*ue[m][j-1] + 5.0*ue[m][j]);
}
}
}
/*--------------------------------------------------------------------
c zeta-direction flux differences
c-------------------------------------------------------------------*/
for (j = 1; j <= grid_points[1]-2; j++) {
eta = (double)j * dnym1;
for (i = 1; i <= grid_points[0]-2; i++) {
xi = (double)i * dnxm1;
for (k = 0; k <= grid_points[2]-1; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, dtemp);
for (m = 0; m < 5; m++) {
ue[m][k] = dtemp[m];
}
dtpp = 1.0/dtemp[0];
for (m = 1; m < 5; m++) {
buf[m][k] = dtpp * dtemp[m];
}
cuf[k] = buf[3][k] * buf[3][k];
buf[0][k] = cuf[k] + buf[1][k] * buf[1][k] +
buf[2][k] * buf[2][k];
q[k] = 0.5*(buf[1][k]*ue[1][k] + buf[2][k]*ue[2][k] +
buf[3][k]*ue[3][k]);
}
for (k = 1; k <= grid_points[2]-2; k++) {
km1 = k-1;
kp1 = k+1;
forcing[0][i][j][k] = forcing[0][i][j][k] -
tz2*( ue[3][kp1]-ue[3][km1] )+
dz1tz1*(ue[0][kp1]-2.0*ue[0][k]+ue[0][km1]);
forcing[1][i][j][k] = forcing[1][i][j][k]
- tz2 * (ue[1][kp1]*buf[3][kp1]-ue[1][km1]*buf[3][km1])+
zzcon2*(buf[1][kp1]-2.0*buf[1][k]+buf[1][km1])+
dz2tz1*( ue[1][kp1]-2.0* ue[1][k]+ ue[1][km1]);
forcing[2][i][j][k] = forcing[2][i][j][k]
- tz2 * (ue[2][kp1]*buf[3][kp1]-ue[2][km1]*buf[3][km1])+
zzcon2*(buf[2][kp1]-2.0*buf[2][k]+buf[2][km1])+
dz3tz1*(ue[2][kp1]-2.0*ue[2][k]+ue[2][km1]);
forcing[3][i][j][k] = forcing[3][i][j][k]
- tz2 * ((ue[3][kp1]*buf[3][kp1]+c2*(ue[4][kp1]-q[kp1]))-
(ue[3][km1]*buf[3][km1]+c2*(ue[4][km1]-q[km1])))+
zzcon1*(buf[3][kp1]-2.0*buf[3][k]+buf[3][km1])+
dz4tz1*( ue[3][kp1]-2.0*ue[3][k] +ue[3][km1]);
forcing[4][i][j][k] = forcing[4][i][j][k]
- tz2 * (buf[3][kp1]*(c1*ue[4][kp1]-c2*q[kp1])-
buf[3][km1]*(c1*ue[4][km1]-c2*q[km1]))+
0.5*zzcon3*(buf[0][kp1]-2.0*buf[0][k]
+buf[0][km1])+
zzcon4*(cuf[kp1]-2.0*cuf[k]+cuf[km1])+
zzcon5*(buf[4][kp1]-2.0*buf[4][k]+buf[4][km1])+
dz5tz1*( ue[4][kp1]-2.0*ue[4][k]+ ue[4][km1]);
}
/*--------------------------------------------------------------------
c Fourth-order dissipation
c-------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
k = 1;
forcing[m][i][j][k] = forcing[m][i][j][k] - dssp *
(5.0*ue[m][k] - 4.0*ue[m][k+1] +ue[m][k+2]);
k = 2;
forcing[m][i][j][k] = forcing[m][i][j][k] - dssp *
(-4.0*ue[m][k-1] + 6.0*ue[m][k] -
4.0*ue[m][k+1] + ue[m][k+2]);
}
for (m = 0; m < 5; m++) {
for (k = 3; k <= grid_points[2]-4; k++) {
forcing[m][i][j][k] = forcing[m][i][j][k] - dssp*
(ue[m][k-2] - 4.0*ue[m][k-1] +
6.0*ue[m][k] - 4.0*ue[m][k+1] + ue[m][k+2]);
}
}
for (m = 0; m < 5; m++) {
k = grid_points[2]-3;
forcing[m][i][j][k] = forcing[m][i][j][k] - dssp *
(ue[m][k-2] - 4.0*ue[m][k-1] +
6.0*ue[m][k] - 4.0*ue[m][k+1]);
k = grid_points[2]-2;
forcing[m][i][j][k] = forcing[m][i][j][k] - dssp *
(ue[m][k-2] - 4.0*ue[m][k-1] + 5.0*ue[m][k]);
}
}
}
/*--------------------------------------------------------------------
c now change the sign of the forcing function,
c-------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
forcing[m][i][j][k] = -1.0 * forcing[m][i][j][k];
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void exact_solution(double xi, double eta, double zeta,
double dtemp[5]) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c this function returns the exact solution at point xi, eta, zeta
c-------------------------------------------------------------------*/
int m;
for (m = 0; m < 5; m++) {
dtemp[m] = ce[0][m] +
xi*(ce[1][m] + xi*(ce[4][m] +
xi*(ce[7][m] + xi*ce[10][m]))) +
eta*(ce[2][m] + eta*(ce[5][m] +
eta*(ce[8][m] + eta*ce[11][m])))+
zeta*(ce[3][m] + zeta*(ce[6][m] +
zeta*(ce[9][m] +
zeta*ce[12][m])));
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void initialize(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c This subroutine initializes the field variable u using
c tri-linear transfinite interpolation of the boundary values
c-------------------------------------------------------------------*/
int i, j, k, m, ix, iy, iz;
double xi, eta, zeta, Pface[2][3][5], Pxi, Peta, Pzeta, temp[5];
/*--------------------------------------------------------------------
c Later (in compute_rhs) we compute 1/u for every element. A few of
c the corner elements are not used, but it convenient (and faster)
c to compute the whole thing with a simple loop. Make sure those
c values are nonzero by initializing the whole thing here.
c-------------------------------------------------------------------*/
for (i = 0; i <= IMAX-1; i++) {
for (j = 0; j <= IMAX-1; j++) {
for (k = 0; k <= IMAX-1; k++) {
u[0][i][j][k] = 1.0;
u[1][i][j][k] = 0.0;
u[2][i][j][k] = 0.0;
u[3][i][j][k] = 0.0;
u[4][i][j][k] = 1.0;
}
}
}
/*--------------------------------------------------------------------
c first store the "interpolated" values everywhere on the grid
c-------------------------------------------------------------------*/
for (i = 0; i <= grid_points[0]-1; i++) {
xi = (double)i * dnxm1;
for (j = 0; j <= grid_points[1]-1; j++) {
eta = (double)j * dnym1;
for (k = 0; k <= grid_points[2]-1; k++) {
zeta = (double)k * dnzm1;
for (ix = 0; ix < 2; ix++) {
exact_solution((double)ix, eta, zeta,
&Pface[ix][0][0]);
}
for (iy = 0; iy < 2; iy++) {
exact_solution(xi, (double)iy , zeta,
&Pface[iy][1][0]);
}
for (iz = 0; iz < 2; iz++) {
exact_solution(xi, eta, (double)iz,
&Pface[iz][2][0]);
}
for (m = 0; m < 5; m++) {
Pxi = xi * Pface[1][0][m] +
(1.0-xi) * Pface[0][0][m];
Peta = eta * Pface[1][1][m] +
(1.0-eta) * Pface[0][1][m];
Pzeta = zeta * Pface[1][2][m] +
(1.0-zeta) * Pface[0][2][m];
u[m][i][j][k] = Pxi + Peta + Pzeta -
Pxi*Peta - Pxi*Pzeta - Peta*Pzeta +
Pxi*Peta*Pzeta;
}
}
}
}
/*--------------------------------------------------------------------
c now store the exact values on the boundaries
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c west face
c-------------------------------------------------------------------*/
xi = 0.0;
i = 0;
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[m][i][j][k] = temp[m];
}
}
}
/*--------------------------------------------------------------------
c east face
c-------------------------------------------------------------------*/
xi = 1.0;
i = grid_points[0]-1;
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[m][i][j][k] = temp[m];
}
}
}
/*--------------------------------------------------------------------
c south face
c-------------------------------------------------------------------*/
eta = 0.0;
j = 0;
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i * dnxm1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[m][i][j][k] = temp[m];
}
}
}
/*--------------------------------------------------------------------
c north face
c-------------------------------------------------------------------*/
eta = 1.0;
j = grid_points[1]-1;
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i * dnxm1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[m][i][j][k] = temp[m];
}
}
}
/*--------------------------------------------------------------------
c bottom face
c-------------------------------------------------------------------*/
zeta = 0.0;
k = 0;
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i *dnxm1;
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[m][i][j][k] = temp[m];
}
}
}
/*--------------------------------------------------------------------
c top face
c-------------------------------------------------------------------*/
zeta = 1.0;
k = grid_points[2]-1;
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i * dnxm1;
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[m][i][j][k] = temp[m];
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void lhsinit(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
int i, j, k, n;
/*--------------------------------------------------------------------
c zap the whole left hand side for starters
c-------------------------------------------------------------------*/
for (n = 0; n < 15; n++) {
#pragma omp for nowait
for (i = 0; i < grid_points[0]; i++) {
for (j = 0; j < grid_points[1]; j++) {
for (k = 0; k < grid_points[2]; k++) {
lhs[n][i][j][k] = 0.0;
}
}
}
}
#pragma omp barrier
/*--------------------------------------------------------------------
c next, set all diagonal values to 1. This is overkill, but
c convenient
c-------------------------------------------------------------------*/
for (n = 0; n < 3; n++) {
#pragma omp for
for (i = 0; i < grid_points[0]; i++) {
for (j = 0; j < grid_points[1]; j++) {
for (k = 0; k < grid_points[2]; k++) {
lhs[5*n+2][i][j][k] = 1.0;
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void lhsx(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c This function computes the left hand side for the three x-factors
c-------------------------------------------------------------------*/
double ru1;
int i, j, k;
/*--------------------------------------------------------------------
c first fill the lhs for the u-eigenvalue
c-------------------------------------------------------------------*/
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
#pragma omp for
for (i = 0; i <= grid_points[0]-1; i++) {
ru1 = c3c4*rho_i[i][j][k];
cv[i] = us[i][j][k];
rhon[i] = max(dx2+con43*ru1,
max(dx5+c1c5*ru1,
max(dxmax+ru1,
dx1)));
}
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
lhs[0][i][j][k] = 0.0;
lhs[1][i][j][k] = - dttx2 * cv[i-1] - dttx1 * rhon[i-1];
lhs[2][i][j][k] = 1.0 + c2dttx1 * rhon[i];
lhs[3][i][j][k] = dttx2 * cv[i+1] - dttx1 * rhon[i+1];
lhs[4][i][j][k] = 0.0;
}
}
}
/*--------------------------------------------------------------------
c add fourth order dissipation
c-------------------------------------------------------------------*/
i = 1;
#pragma omp for nowait
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
lhs[2][i][j][k] = lhs[2][i][j][k] + comz5;
lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;
lhs[4][i][j][k] = lhs[4][i][j][k] + comz1;
lhs[1][i+1][j][k] = lhs[1][i+1][j][k] - comz4;
lhs[2][i+1][j][k] = lhs[2][i+1][j][k] + comz6;
lhs[3][i+1][j][k] = lhs[3][i+1][j][k] - comz4;
lhs[4][i+1][j][k] = lhs[4][i+1][j][k] + comz1;
}
}
#pragma omp for nowait
for (i = 3; i <= grid_points[0]-4; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
lhs[0][i][j][k] = lhs[0][i][j][k] + comz1;
lhs[1][i][j][k] = lhs[1][i][j][k] - comz4;
lhs[2][i][j][k] = lhs[2][i][j][k] + comz6;
lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;
lhs[4][i][j][k] = lhs[4][i][j][k] + comz1;
}
}
}
i = grid_points[0]-3;
#pragma omp for
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
lhs[0][i][j][k] = lhs[0][i][j][k] + comz1;
lhs[1][i][j][k] = lhs[1][i][j][k] - comz4;
lhs[2][i][j][k] = lhs[2][i][j][k] + comz6;
lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;
lhs[0][i+1][j][k] = lhs[0][i+1][j][k] + comz1;
lhs[1][i+1][j][k] = lhs[1][i+1][j][k] - comz4;
lhs[2][i+1][j][k] = lhs[2][i+1][j][k] + comz5;
}
}
/*--------------------------------------------------------------------
c subsequently, fill the other factors (u+c), (u-c) by adding to
c the first
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
lhs[0+5][i][j][k] = lhs[0][i][j][k];
lhs[1+5][i][j][k] = lhs[1][i][j][k] -
dttx2 * speed[i-1][j][k];
lhs[2+5][i][j][k] = lhs[2][i][j][k];
lhs[3+5][i][j][k] = lhs[3][i][j][k] +
dttx2 * speed[i+1][j][k];
lhs[4+5][i][j][k] = lhs[4][i][j][k];
lhs[0+10][i][j][k] = lhs[0][i][j][k];
lhs[1+10][i][j][k] = lhs[1][i][j][k] +
dttx2 * speed[i-1][j][k];
lhs[2+10][i][j][k] = lhs[2][i][j][k];
lhs[3+10][i][j][k] = lhs[3][i][j][k] -
dttx2 * speed[i+1][j][k];
lhs[4+10][i][j][k] = lhs[4][i][j][k];
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void lhsy(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c This function computes the left hand side for the three y-factors
c-------------------------------------------------------------------*/
double ru1;
int i, j, k;
/*--------------------------------------------------------------------
c first fill the lhs for the u-eigenvalue
c-------------------------------------------------------------------*/
for (i = 1; i <= grid_points[0]-2; i++) {
for (k = 1; k <= grid_points[2]-2; k++) {
#pragma omp for
for (j = 0; j <= grid_points[1]-1; j++) {
ru1 = c3c4*rho_i[i][j][k];
cv[j] = vs[i][j][k];
rhoq[j] = max(dy3 + con43 * ru1,
max(dy5 + c1c5*ru1,
max(dymax + ru1,
dy1)));
}
#pragma omp for
for (j = 1; j <= grid_points[1]-2; j++) {
lhs[0][i][j][k] = 0.0;
lhs[1][i][j][k] = -dtty2 * cv[j-1] - dtty1 * rhoq[j-1];
lhs[2][i][j][k] = 1.0 + c2dtty1 * rhoq[j];
lhs[3][i][j][k] = dtty2 * cv[j+1] - dtty1 * rhoq[j+1];
lhs[4][i][j][k] = 0.0;
}
}
}
/*--------------------------------------------------------------------
c add fourth order dissipation
c-------------------------------------------------------------------*/
j = 1;
#pragma omp for nowait
for (i = 1; i <= grid_points[0]-2; i++) {
for (k = 1; k <= grid_points[2]-2; k++) {
lhs[2][i][j][k] = lhs[2][i][j][k] + comz5;
lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;
lhs[4][i][j][k] = lhs[4][i][j][k] + comz1;
lhs[1][i][j+1][k] = lhs[1][i][j+1][k] - comz4;
lhs[2][i][j+1][k] = lhs[2][i][j+1][k] + comz6;
lhs[3][i][j+1][k] = lhs[3][i][j+1][k] - comz4;
lhs[4][i][j+1][k] = lhs[4][i][j+1][k] + comz1;
}
}
#pragma omp for nowait
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 3; j <= grid_points[1]-4; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
lhs[0][i][j][k] = lhs[0][i][j][k] + comz1;
lhs[1][i][j][k] = lhs[1][i][j][k] - comz4;
lhs[2][i][j][k] = lhs[2][i][j][k] + comz6;
lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;
lhs[4][i][j][k] = lhs[4][i][j][k] + comz1;
}
}
}
j = grid_points[1]-3;
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (k = 1; k <= grid_points[2]-2; k++) {
lhs[0][i][j][k] = lhs[0][i][j][k] + comz1;
lhs[1][i][j][k] = lhs[1][i][j][k] - comz4;
lhs[2][i][j][k] = lhs[2][i][j][k] + comz6;
lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;
lhs[0][i][j+1][k] = lhs[0][i][j+1][k] + comz1;
lhs[1][i][j+1][k] = lhs[1][i][j+1][k] - comz4;
lhs[2][i][j+1][k] = lhs[2][i][j+1][k] + comz5;
}
}
/*--------------------------------------------------------------------
c subsequently, do the other two factors
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
lhs[0+5][i][j][k] = lhs[0][i][j][k];
lhs[1+5][i][j][k] = lhs[1][i][j][k] -
dtty2 * speed[i][j-1][k];
lhs[2+5][i][j][k] = lhs[2][i][j][k];
lhs[3+5][i][j][k] = lhs[3][i][j][k] +
dtty2 * speed[i][j+1][k];
lhs[4+5][i][j][k] = lhs[4][i][j][k];
lhs[0+10][i][j][k] = lhs[0][i][j][k];
lhs[1+10][i][j][k] = lhs[1][i][j][k] +
dtty2 * speed[i][j-1][k];
lhs[2+10][i][j][k] = lhs[2][i][j][k];
lhs[3+10][i][j][k] = lhs[3][i][j][k] -
dtty2 * speed[i][j+1][k];
lhs[4+10][i][j][k] = lhs[4][i][j][k];
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void lhsz(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c This function computes the left hand side for the three z-factors
c-------------------------------------------------------------------*/
double ru1;
int i, j, k;
/*--------------------------------------------------------------------
c first fill the lhs for the u-eigenvalue
c-------------------------------------------------------------------*/
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
#pragma omp for
for (k = 0; k <= grid_points[2]-1; k++) {
ru1 = c3c4*rho_i[i][j][k];
cv[k] = ws[i][j][k];
rhos[k] = max(dz4 + con43 * ru1,
max(dz5 + c1c5 * ru1,
max(dzmax + ru1,
dz1)));
}
#pragma omp for
for (k = 1; k <= grid_points[2]-2; k++) {
lhs[0][i][j][k] = 0.0;
lhs[1][i][j][k] = -dttz2 * cv[k-1] - dttz1 * rhos[k-1];
lhs[2][i][j][k] = 1.0 + c2dttz1 * rhos[k];
lhs[3][i][j][k] = dttz2 * cv[k+1] - dttz1 * rhos[k+1];
lhs[4][i][j][k] = 0.0;
}
}
}
/*--------------------------------------------------------------------
c add fourth order dissipation
c-------------------------------------------------------------------*/
k = 1;
#pragma omp for nowait
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
lhs[2][i][j][k] = lhs[2][i][j][k] + comz5;
lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;
lhs[4][i][j][k] = lhs[4][i][j][k] + comz1;
lhs[1][i][j][k+1] = lhs[1][i][j][k+1] - comz4;
lhs[2][i][j][k+1] = lhs[2][i][j][k+1] + comz6;
lhs[3][i][j][k+1] = lhs[3][i][j][k+1] - comz4;
lhs[4][i][j][k+1] = lhs[4][i][j][k+1] + comz1;
}
}
#pragma omp for nowait
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 3; k <= grid_points[2]-4; k++) {
lhs[0][i][j][k] = lhs[0][i][j][k] + comz1;
lhs[1][i][j][k] = lhs[1][i][j][k] - comz4;
lhs[2][i][j][k] = lhs[2][i][j][k] + comz6;
lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;
lhs[4][i][j][k] = lhs[4][i][j][k] + comz1;
}
}
}
k = grid_points[2]-3;
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
lhs[0][i][j][k] = lhs[0][i][j][k] + comz1;
lhs[1][i][j][k] = lhs[1][i][j][k] - comz4;
lhs[2][i][j][k] = lhs[2][i][j][k] + comz6;
lhs[3][i][j][k] = lhs[3][i][j][k] - comz4;
lhs[0][i][j][k+1] = lhs[0][i][j][k+1] + comz1;
lhs[1][i][j][k+1] = lhs[1][i][j][k+1] - comz4;
lhs[2][i][j][k+1] = lhs[2][i][j][k+1] + comz5;
}
}
/*--------------------------------------------------------------------
c subsequently, fill the other factors (u+c), (u-c)
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
lhs[0+5][i][j][k] = lhs[0][i][j][k];
lhs[1+5][i][j][k] = lhs[1][i][j][k] -
dttz2 * speed[i][j][k-1];
lhs[2+5][i][j][k] = lhs[2][i][j][k];
lhs[3+5][i][j][k] = lhs[3][i][j][k] +
dttz2 * speed[i][j][k+1];
lhs[4+5][i][j][k] = lhs[4][i][j][k];
lhs[0+10][i][j][k] = lhs[0][i][j][k];
lhs[1+10][i][j][k] = lhs[1][i][j][k] +
dttz2 * speed[i][j][k-1];
lhs[2+10][i][j][k] = lhs[2][i][j][k];
lhs[3+10][i][j][k] = lhs[3][i][j][k] -
dttz2 * speed[i][j][k+1];
lhs[4+10][i][j][k] = lhs[4][i][j][k];
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void ninvr(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c block-diagonal matrix-vector multiplication
c-------------------------------------------------------------------*/
int i, j, k;
double r1, r2, r3, r4, r5, t1, t2;
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
r1 = rhs[0][i][j][k];
r2 = rhs[1][i][j][k];
r3 = rhs[2][i][j][k];
r4 = rhs[3][i][j][k];
r5 = rhs[4][i][j][k];
t1 = bt * r3;
t2 = 0.5 * ( r4 + r5 );
rhs[0][i][j][k] = -r2;
rhs[1][i][j][k] = r1;
rhs[2][i][j][k] = bt * ( r4 - r5 );
rhs[3][i][j][k] = -t1 + t2;
rhs[4][i][j][k] = t1 + t2;
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void pinvr(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c block-diagonal matrix-vector multiplication
c-------------------------------------------------------------------*/
int i, j, k;
double r1, r2, r3, r4, r5, t1, t2;
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
r1 = rhs[0][i][j][k];
r2 = rhs[1][i][j][k];
r3 = rhs[2][i][j][k];
r4 = rhs[3][i][j][k];
r5 = rhs[4][i][j][k];
t1 = bt * r1;
t2 = 0.5 * ( r4 + r5 );
rhs[0][i][j][k] = bt * ( r4 - r5 );
rhs[1][i][j][k] = -r3;
rhs[2][i][j][k] = r2;
rhs[3][i][j][k] = -t1 + t2;
rhs[4][i][j][k] = t1 + t2;
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void compute_rhs(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
int i, j, k, m;
double aux, rho_inv, uijk, up1, um1, vijk, vp1, vm1,
wijk, wp1, wm1;
/*--------------------------------------------------------------------
c compute the reciprocal of density, and the kinetic energy,
c and the speed of sound.
c-------------------------------------------------------------------*/
#pragma omp for nowait
for (i = 0; i <= grid_points[0]-1; i++) {
for (j = 0; j <= grid_points[1]-1; j++) {
for (k = 0; k <= grid_points[2]-1; k++) {
rho_inv = 1.0/u[0][i][j][k];
rho_i[i][j][k] = rho_inv;
us[i][j][k] = u[1][i][j][k] * rho_inv;
vs[i][j][k] = u[2][i][j][k] * rho_inv;
ws[i][j][k] = u[3][i][j][k] * rho_inv;
square[i][j][k] = 0.5* (u[1][i][j][k]*u[1][i][j][k] +
u[2][i][j][k]*u[2][i][j][k] +
u[3][i][j][k]*u[3][i][j][k] ) * rho_inv;
qs[i][j][k] = square[i][j][k] * rho_inv;
/*--------------------------------------------------------------------
c (do not need speed and ainx until the lhs computation)
c-------------------------------------------------------------------*/
aux = c1c2*rho_inv* (u[4][i][j][k] - square[i][j][k]);
aux = sqrt(aux);
speed[i][j][k] = aux;
ainv[i][j][k] = 1.0/aux;
}
}
}
/*--------------------------------------------------------------------
c copy the exact forcing term to the right hand side; because
c this forcing term is known, we can store it on the whole grid
c including the boundary
c-------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
#pragma omp for
for (i = 0; i <= grid_points[0]-1; i++) {
for (j = 0; j <= grid_points[1]-1; j++) {
for (k = 0; k <= grid_points[2]-1; k++) {
rhs[m][i][j][k] = forcing[m][i][j][k];
}
}
}
}
/*--------------------------------------------------------------------
c compute xi-direction fluxes
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
uijk = us[i][j][k];
up1 = us[i+1][j][k];
um1 = us[i-1][j][k];
rhs[0][i][j][k] = rhs[0][i][j][k] + dx1tx1 *
(u[0][i+1][j][k] - 2.0*u[0][i][j][k] +
u[0][i-1][j][k]) -
tx2 * (u[1][i+1][j][k] - u[1][i-1][j][k]);
rhs[1][i][j][k] = rhs[1][i][j][k] + dx2tx1 *
(u[1][i+1][j][k] - 2.0*u[1][i][j][k] +
u[1][i-1][j][k]) +
xxcon2*con43 * (up1 - 2.0*uijk + um1) -
tx2 * (u[1][i+1][j][k]*up1 -
u[1][i-1][j][k]*um1 +
(u[4][i+1][j][k]- square[i+1][j][k]-
u[4][i-1][j][k]+ square[i-1][j][k])*
c2);
rhs[2][i][j][k] = rhs[2][i][j][k] + dx3tx1 *
(u[2][i+1][j][k] - 2.0*u[2][i][j][k] +
u[2][i-1][j][k]) +
xxcon2 * (vs[i+1][j][k] - 2.0*vs[i][j][k] +
vs[i-1][j][k]) -
tx2 * (u[2][i+1][j][k]*up1 -
u[2][i-1][j][k]*um1);
rhs[3][i][j][k] = rhs[3][i][j][k] + dx4tx1 *
(u[3][i+1][j][k] - 2.0*u[3][i][j][k] +
u[3][i-1][j][k]) +
xxcon2 * (ws[i+1][j][k] - 2.0*ws[i][j][k] +
ws[i-1][j][k]) -
tx2 * (u[3][i+1][j][k]*up1 -
u[3][i-1][j][k]*um1);
rhs[4][i][j][k] = rhs[4][i][j][k] + dx5tx1 *
(u[4][i+1][j][k] - 2.0*u[4][i][j][k] +
u[4][i-1][j][k]) +
xxcon3 * (qs[i+1][j][k] - 2.0*qs[i][j][k] +
qs[i-1][j][k]) +
xxcon4 * (up1*up1 - 2.0*uijk*uijk +
um1*um1) +
xxcon5 * (u[4][i+1][j][k]*rho_i[i+1][j][k] -
2.0*u[4][i][j][k]*rho_i[i][j][k] +
u[4][i-1][j][k]*rho_i[i-1][j][k]) -
tx2 * ( (c1*u[4][i+1][j][k] -
c2*square[i+1][j][k])*up1 -
(c1*u[4][i-1][j][k] -
c2*square[i-1][j][k])*um1 );
}
}
}
/*--------------------------------------------------------------------
c add fourth order xi-direction dissipation
c-------------------------------------------------------------------*/
i = 1;
for (m = 0; m < 5; m++) {
#pragma omp for
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k]- dssp *
( 5.0*u[m][i][j][k] - 4.0*u[m][i+1][j][k] +
u[m][i+2][j][k]);
}
}
}
i = 2;
for (m = 0; m < 5; m++) {
#pragma omp for
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k] - dssp *
(-4.0*u[m][i-1][j][k] + 6.0*u[m][i][j][k] -
4.0*u[m][i+1][j][k] + u[m][i+2][j][k]);
}
}
}
for (m = 0; m < 5; m++) {
#pragma omp for
for (i = 3*1; i <= grid_points[0]-3*1-1; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k] - dssp *
( u[m][i-2][j][k] - 4.0*u[m][i-1][j][k] +
6.0*u[m][i][j][k] - 4.0*u[m][i+1][j][k] +
u[m][i+2][j][k] );
}
}
}
}
i = grid_points[0]-3;
for (m = 0; m < 5; m++) {
#pragma omp for
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k] - dssp *
( u[m][i-2][j][k] - 4.0*u[m][i-1][j][k] +
6.0*u[m][i][j][k] - 4.0*u[m][i+1][j][k] );
}
}
}
i = grid_points[0]-2;
for (m = 0; m < 5; m++) {
#pragma omp for
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k] - dssp *
( u[m][i-2][j][k] - 4.0*u[m][i-1][j][k] +
5.0*u[m][i][j][k] );
}
}
}
#pragma omp barrier
/*--------------------------------------------------------------------
c compute eta-direction fluxes
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
vijk = vs[i][j][k];
vp1 = vs[i][j+1][k];
vm1 = vs[i][j-1][k];
rhs[0][i][j][k] = rhs[0][i][j][k] + dy1ty1 *
(u[0][i][j+1][k] - 2.0*u[0][i][j][k] +
u[0][i][j-1][k]) -
ty2 * (u[2][i][j+1][k] - u[2][i][j-1][k]);
rhs[1][i][j][k] = rhs[1][i][j][k] + dy2ty1 *
(u[1][i][j+1][k] - 2.0*u[1][i][j][k] +
u[1][i][j-1][k]) +
yycon2 * (us[i][j+1][k] - 2.0*us[i][j][k] +
us[i][j-1][k]) -
ty2 * (u[1][i][j+1][k]*vp1 -
u[1][i][j-1][k]*vm1);
rhs[2][i][j][k] = rhs[2][i][j][k] + dy3ty1 *
(u[2][i][j+1][k] - 2.0*u[2][i][j][k] +
u[2][i][j-1][k]) +
yycon2*con43 * (vp1 - 2.0*vijk + vm1) -
ty2 * (u[2][i][j+1][k]*vp1 -
u[2][i][j-1][k]*vm1 +
(u[4][i][j+1][k] - square[i][j+1][k] -
u[4][i][j-1][k] + square[i][j-1][k])
*c2);
rhs[3][i][j][k] = rhs[3][i][j][k] + dy4ty1 *
(u[3][i][j+1][k] - 2.0*u[3][i][j][k] +
u[3][i][j-1][k]) +
yycon2 * (ws[i][j+1][k] - 2.0*ws[i][j][k] +
ws[i][j-1][k]) -
ty2 * (u[3][i][j+1][k]*vp1 -
u[3][i][j-1][k]*vm1);
rhs[4][i][j][k] = rhs[4][i][j][k] + dy5ty1 *
(u[4][i][j+1][k] - 2.0*u[4][i][j][k] +
u[4][i][j-1][k]) +
yycon3 * (qs[i][j+1][k] - 2.0*qs[i][j][k] +
qs[i][j-1][k]) +
yycon4 * (vp1*vp1 - 2.0*vijk*vijk +
vm1*vm1) +
yycon5 * (u[4][i][j+1][k]*rho_i[i][j+1][k] -
2.0*u[4][i][j][k]*rho_i[i][j][k] +
u[4][i][j-1][k]*rho_i[i][j-1][k]) -
ty2 * ((c1*u[4][i][j+1][k] -
c2*square[i][j+1][k]) * vp1 -
(c1*u[4][i][j-1][k] -
c2*square[i][j-1][k]) * vm1);
}
}
}
/*--------------------------------------------------------------------
c add fourth order eta-direction dissipation
c-------------------------------------------------------------------*/
j = 1;
for (m = 0; m < 5; m++) {
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (k = 1; k <= grid_points[2]-2; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k]- dssp *
( 5.0*u[m][i][j][k] - 4.0*u[m][i][j+1][k] +
u[m][i][j+2][k]);
}
}
}
j = 2;
for (m = 0; m < 5; m++) {
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (k = 1; k <= grid_points[2]-2; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k] - dssp *
(-4.0*u[m][i][j-1][k] + 6.0*u[m][i][j][k] -
4.0*u[m][i][j+1][k] + u[m][i][j+2][k]);
}
}
}
for (m = 0; m < 5; m++) {
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 3*1; j <= grid_points[1]-3*1-1; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k] - dssp *
( u[m][i][j-2][k] - 4.0*u[m][i][j-1][k] +
6.0*u[m][i][j][k] - 4.0*u[m][i][j+1][k] +
u[m][i][j+2][k] );
}
}
}
}
j = grid_points[1]-3;
for (m = 0; m < 5; m++) {
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (k = 1; k <= grid_points[2]-2; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k] - dssp *
( u[m][i][j-2][k] - 4.0*u[m][i][j-1][k] +
6.0*u[m][i][j][k] - 4.0*u[m][i][j+1][k] );
}
}
}
j = grid_points[1]-2;
for (m = 0; m < 5; m++) {
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (k = 1; k <= grid_points[2]-2; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k] - dssp *
( u[m][i][j-2][k] - 4.0*u[m][i][j-1][k] +
5.0*u[m][i][j][k] );
}
}
}
#pragma omp barrier
/*--------------------------------------------------------------------
c compute zeta-direction fluxes
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
wijk = ws[i][j][k];
wp1 = ws[i][j][k+1];
wm1 = ws[i][j][k-1];
rhs[0][i][j][k] = rhs[0][i][j][k] + dz1tz1 *
(u[0][i][j][k+1] - 2.0*u[0][i][j][k] +
u[0][i][j][k-1]) -
tz2 * (u[3][i][j][k+1] - u[3][i][j][k-1]);
rhs[1][i][j][k] = rhs[1][i][j][k] + dz2tz1 *
(u[1][i][j][k+1] - 2.0*u[1][i][j][k] +
u[1][i][j][k-1]) +
zzcon2 * (us[i][j][k+1] - 2.0*us[i][j][k] +
us[i][j][k-1]) -
tz2 * (u[1][i][j][k+1]*wp1 -
u[1][i][j][k-1]*wm1);
rhs[2][i][j][k] = rhs[2][i][j][k] + dz3tz1 *
(u[2][i][j][k+1] - 2.0*u[2][i][j][k] +
u[2][i][j][k-1]) +
zzcon2 * (vs[i][j][k+1] - 2.0*vs[i][j][k] +
vs[i][j][k-1]) -
tz2 * (u[2][i][j][k+1]*wp1 -
u[2][i][j][k-1]*wm1);
rhs[3][i][j][k] = rhs[3][i][j][k] + dz4tz1 *
(u[3][i][j][k+1] - 2.0*u[3][i][j][k] +
u[3][i][j][k-1]) +
zzcon2*con43 * (wp1 - 2.0*wijk + wm1) -
tz2 * (u[3][i][j][k+1]*wp1 -
u[3][i][j][k-1]*wm1 +
(u[4][i][j][k+1] - square[i][j][k+1] -
u[4][i][j][k-1] + square[i][j][k-1])
*c2);
rhs[4][i][j][k] = rhs[4][i][j][k] + dz5tz1 *
(u[4][i][j][k+1] - 2.0*u[4][i][j][k] +
u[4][i][j][k-1]) +
zzcon3 * (qs[i][j][k+1] - 2.0*qs[i][j][k] +
qs[i][j][k-1]) +
zzcon4 * (wp1*wp1 - 2.0*wijk*wijk +
wm1*wm1) +
zzcon5 * (u[4][i][j][k+1]*rho_i[i][j][k+1] -
2.0*u[4][i][j][k]*rho_i[i][j][k] +
u[4][i][j][k-1]*rho_i[i][j][k-1]) -
tz2 * ( (c1*u[4][i][j][k+1] -
c2*square[i][j][k+1])*wp1 -
(c1*u[4][i][j][k-1] -
c2*square[i][j][k-1])*wm1);
}
}
}
/*--------------------------------------------------------------------
c add fourth order zeta-direction dissipation
c-------------------------------------------------------------------*/
k = 1;
for (m = 0; m < 5; m++) {
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
rhs[m][i][j][k] = rhs[m][i][j][k]- dssp *
( 5.0*u[m][i][j][k] - 4.0*u[m][i][j][k+1] +
u[m][i][j][k+2]);
}
}
}
k = 2;
for (m = 0; m < 5; m++) {
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
rhs[m][i][j][k] = rhs[m][i][j][k] - dssp *
(-4.0*u[m][i][j][k-1] + 6.0*u[m][i][j][k] -
4.0*u[m][i][j][k+1] + u[m][i][j][k+2]);
}
}
}
for (m = 0; m < 5; m++) {
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 3*1; k <= grid_points[2]-3*1-1; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k] - dssp *
( u[m][i][j][k-2] - 4.0*u[m][i][j][k-1] +
6.0*u[m][i][j][k] - 4.0*u[m][i][j][k+1] +
u[m][i][j][k+2] );
}
}
}
}
k = grid_points[2]-3;
for (m = 0; m < 5; m++) {
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
rhs[m][i][j][k] = rhs[m][i][j][k] - dssp *
( u[m][i][j][k-2] - 4.0*u[m][i][j][k-1] +
6.0*u[m][i][j][k] - 4.0*u[m][i][j][k+1] );
}
}
}
k = grid_points[2]-2;
for (m = 0; m < 5; m++) {
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
rhs[m][i][j][k] = rhs[m][i][j][k] - dssp *
( u[m][i][j][k-2] - 4.0*u[m][i][j][k-1] +
5.0*u[m][i][j][k] );
}
}
}
for (m = 0; m < 5; m++) {
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k] * dt;
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void set_constants(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
ce[0][0] = 2.0;
ce[1][0] = 0.0;
ce[2][0] = 0.0;
ce[3][0] = 4.0;
ce[4][0] = 5.0;
ce[5][0] = 3.0;
ce[6][0] = 0.5;
ce[7][0] = 0.02;
ce[8][0] = 0.01;
ce[9][0] = 0.03;
ce[10][0] = 0.5;
ce[11][0] = 0.4;
ce[12][0] = 0.3;
ce[0][1] = 1.0;
ce[1][1] = 0.0;
ce[2][1] = 0.0;
ce[3][1] = 0.0;
ce[4][1] = 1.0;
ce[5][1] = 2.0;
ce[6][1] = 3.0;
ce[7][1] = 0.01;
ce[8][1] = 0.03;
ce[9][1] = 0.02;
ce[10][1] = 0.4;
ce[11][1] = 0.3;
ce[12][1] = 0.5;
ce[0][2] = 2.0;
ce[1][2] = 2.0;
ce[2][2] = 0.0;
ce[3][2] = 0.0;
ce[4][2] = 0.0;
ce[5][2] = 2.0;
ce[6][2] = 3.0;
ce[7][2] = 0.04;
ce[8][2] = 0.03;
ce[9][2] = 0.05;
ce[10][2] = 0.3;
ce[11][2] = 0.5;
ce[12][2] = 0.4;
ce[0][3] = 2.0;
ce[1][3] = 2.0;
ce[2][3] = 0.0;
ce[3][3] = 0.0;
ce[4][3] = 0.0;
ce[5][3] = 2.0;
ce[6][3] = 3.0;
ce[7][3] = 0.03;
ce[8][3] = 0.05;
ce[9][3] = 0.04;
ce[10][3] = 0.2;
ce[11][3] = 0.1;
ce[12][3] = 0.3;
ce[0][4] = 5.0;
ce[1][4] = 4.0;
ce[2][4] = 3.0;
ce[3][4] = 2.0;
ce[4][4] = 0.1;
ce[5][4] = 0.4;
ce[6][4] = 0.3;
ce[7][4] = 0.05;
ce[8][4] = 0.04;
ce[9][4] = 0.03;
ce[10][4] = 0.1;
ce[11][4] = 0.3;
ce[12][4] = 0.2;
c1 = 1.4;
c2 = 0.4;
c3 = 0.1;
c4 = 1.0;
c5 = 1.4;
bt = sqrt(0.5);
dnxm1 = 1.0 / (double)(grid_points[0]-1);
dnym1 = 1.0 / (double)(grid_points[1]-1);
dnzm1 = 1.0 / (double)(grid_points[2]-1);
c1c2 = c1 * c2;
c1c5 = c1 * c5;
c3c4 = c3 * c4;
c1345 = c1c5 * c3c4;
conz1 = (1.0-c1c5);
tx1 = 1.0 / (dnxm1 * dnxm1);
tx2 = 1.0 / (2.0 * dnxm1);
tx3 = 1.0 / dnxm1;
ty1 = 1.0 / (dnym1 * dnym1);
ty2 = 1.0 / (2.0 * dnym1);
ty3 = 1.0 / dnym1;
tz1 = 1.0 / (dnzm1 * dnzm1);
tz2 = 1.0 / (2.0 * dnzm1);
tz3 = 1.0 / dnzm1;
dx1 = 0.75;
dx2 = 0.75;
dx3 = 0.75;
dx4 = 0.75;
dx5 = 0.75;
dy1 = 0.75;
dy2 = 0.75;
dy3 = 0.75;
dy4 = 0.75;
dy5 = 0.75;
dz1 = 1.0;
dz2 = 1.0;
dz3 = 1.0;
dz4 = 1.0;
dz5 = 1.0;
dxmax = max(dx3, dx4);
dymax = max(dy2, dy4);
dzmax = max(dz2, dz3);
dssp = 0.25 * max(dx1, max(dy1, dz1) );
c4dssp = 4.0 * dssp;
c5dssp = 5.0 * dssp;
dttx1 = dt*tx1;
dttx2 = dt*tx2;
dtty1 = dt*ty1;
dtty2 = dt*ty2;
dttz1 = dt*tz1;
dttz2 = dt*tz2;
c2dttx1 = 2.0*dttx1;
c2dtty1 = 2.0*dtty1;
c2dttz1 = 2.0*dttz1;
dtdssp = dt*dssp;
comz1 = dtdssp;
comz4 = 4.0*dtdssp;
comz5 = 5.0*dtdssp;
comz6 = 6.0*dtdssp;
c3c4tx3 = c3c4*tx3;
c3c4ty3 = c3c4*ty3;
c3c4tz3 = c3c4*tz3;
dx1tx1 = dx1*tx1;
dx2tx1 = dx2*tx1;
dx3tx1 = dx3*tx1;
dx4tx1 = dx4*tx1;
dx5tx1 = dx5*tx1;
dy1ty1 = dy1*ty1;
dy2ty1 = dy2*ty1;
dy3ty1 = dy3*ty1;
dy4ty1 = dy4*ty1;
dy5ty1 = dy5*ty1;
dz1tz1 = dz1*tz1;
dz2tz1 = dz2*tz1;
dz3tz1 = dz3*tz1;
dz4tz1 = dz4*tz1;
dz5tz1 = dz5*tz1;
c2iv = 2.5;
con43 = 4.0/3.0;
con16 = 1.0/6.0;
xxcon1 = c3c4tx3*con43*tx3;
xxcon2 = c3c4tx3*tx3;
xxcon3 = c3c4tx3*conz1*tx3;
xxcon4 = c3c4tx3*con16*tx3;
xxcon5 = c3c4tx3*c1c5*tx3;
yycon1 = c3c4ty3*con43*ty3;
yycon2 = c3c4ty3*ty3;
yycon3 = c3c4ty3*conz1*ty3;
yycon4 = c3c4ty3*con16*ty3;
yycon5 = c3c4ty3*c1c5*ty3;
zzcon1 = c3c4tz3*con43*tz3;
zzcon2 = c3c4tz3*tz3;
zzcon3 = c3c4tz3*conz1*tz3;
zzcon4 = c3c4tz3*con16*tz3;
zzcon5 = c3c4tz3*c1c5*tz3;
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void txinvr(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c block-diagonal matrix-vector multiplication
--------------------------------------------------------------------*/
int i, j, k;
double t1, t2, t3, ac, ru1, uu, vv, ww, r1, r2, r3,
r4, r5, ac2inv;
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
ru1 = rho_i[i][j][k];
uu = us[i][j][k];
vv = vs[i][j][k];
ww = ws[i][j][k];
ac = speed[i][j][k];
ac2inv = ainv[i][j][k]*ainv[i][j][k];
r1 = rhs[0][i][j][k];
r2 = rhs[1][i][j][k];
r3 = rhs[2][i][j][k];
r4 = rhs[3][i][j][k];
r5 = rhs[4][i][j][k];
t1 = c2 * ac2inv * ( qs[i][j][k]*r1 - uu*r2 -
vv*r3 - ww*r4 + r5 );
t2 = bt * ru1 * ( uu * r1 - r2 );
t3 = ( bt * ru1 * ac ) * t1;
rhs[0][i][j][k] = r1 - t1;
rhs[1][i][j][k] = - ru1 * ( ww*r1 - r4 );
rhs[2][i][j][k] = ru1 * ( vv*r1 - r3 );
rhs[3][i][j][k] = - t2 + t3;
rhs[4][i][j][k] = t2 + t3;
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void tzetar(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c block-diagonal matrix-vector multiplication
c-------------------------------------------------------------------*/
int i, j, k;
double t1, t2, t3, ac, xvel, yvel, zvel, r1, r2, r3,
r4, r5, btuz, acinv, ac2u, uzik1;
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
xvel = us[i][j][k];
yvel = vs[i][j][k];
zvel = ws[i][j][k];
ac = speed[i][j][k];
acinv = ainv[i][j][k];
ac2u = ac*ac;
r1 = rhs[0][i][j][k];
r2 = rhs[1][i][j][k];
r3 = rhs[2][i][j][k];
r4 = rhs[3][i][j][k];
r5 = rhs[4][i][j][k];
uzik1 = u[0][i][j][k];
btuz = bt * uzik1;
t1 = btuz*acinv * (r4 + r5);
t2 = r3 + t1;
t3 = btuz * (r4 - r5);
rhs[0][i][j][k] = t2;
rhs[1][i][j][k] = -uzik1*r2 + xvel*t2;
rhs[2][i][j][k] = uzik1*r1 + yvel*t2;
rhs[3][i][j][k] = zvel*t2 + t3;
rhs[4][i][j][k] = uzik1*(-xvel*r2 + yvel*r1) +
qs[i][j][k]*t2 + c2iv*ac2u*t1 + zvel*t3;
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void verify(int no_time_steps, char *cclass, boolean *verified) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c verification routine
--------------------------------------------------------------------*/
double xcrref[5],xceref[5],xcrdif[5],xcedif[5],
epsilon, xce[5], xcr[5], dtref;
int m;
/*--------------------------------------------------------------------
c tolerance level
--------------------------------------------------------------------*/
epsilon = 1.0e-08;
/*--------------------------------------------------------------------
c compute the error norm and the residual norm, and exit if not printing
--------------------------------------------------------------------*/
error_norm(xce);
compute_rhs();
rhs_norm(xcr);
for (m = 0; m < 5; m++) {
xcr[m] = xcr[m] / dt;
}
*cclass = 'U';
*verified = TRUE;
for (m = 0; m < 5; m++) {
xcrref[m] = 1.0;
xceref[m] = 1.0;
}
/*--------------------------------------------------------------------
c reference data for 12X12X12 grids after 100 time steps, with DT = 1.50d-02
--------------------------------------------------------------------*/
if ( grid_points[0] == 12 &&
grid_points[1] == 12 &&
grid_points[2] == 12 &&
no_time_steps == 100) {
*cclass = 'S';
dtref = 1.5e-2;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual.
--------------------------------------------------------------------*/
xcrref[0] = 2.7470315451339479e-02;
xcrref[1] = 1.0360746705285417e-02;
xcrref[2] = 1.6235745065095532e-02;
xcrref[3] = 1.5840557224455615e-02;
xcrref[4] = 3.4849040609362460e-02;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error.
--------------------------------------------------------------------*/
xceref[0] = 2.7289258557377227e-05;
xceref[1] = 1.0364446640837285e-05;
xceref[2] = 1.6154798287166471e-05;
xceref[3] = 1.5750704994480102e-05;
xceref[4] = 3.4177666183390531e-05;
/*--------------------------------------------------------------------
c reference data for 36X36X36 grids after 400 time steps, with DT = 1.5d-03
--------------------------------------------------------------------*/
} else if (grid_points[0] == 36 &&
grid_points[1] == 36 &&
grid_points[2] == 36 &&
no_time_steps == 400) {
*cclass = 'W';
dtref = 1.5e-3;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual.
--------------------------------------------------------------------*/
xcrref[0] = 0.1893253733584e-02;
xcrref[1] = 0.1717075447775e-03;
xcrref[2] = 0.2778153350936e-03;
xcrref[3] = 0.2887475409984e-03;
xcrref[4] = 0.3143611161242e-02;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error.
--------------------------------------------------------------------*/
xceref[0] = 0.7542088599534e-04;
xceref[1] = 0.6512852253086e-05;
xceref[2] = 0.1049092285688e-04;
xceref[3] = 0.1128838671535e-04;
xceref[4] = 0.1212845639773e-03;
/*--------------------------------------------------------------------
c reference data for 64X64X64 grids after 400 time steps, with DT = 1.5d-03
--------------------------------------------------------------------*/
} else if (grid_points[0] == 64 &&
grid_points[1] == 64 &&
grid_points[2] == 64 &&
no_time_steps == 400 ) {
*cclass = 'A';
dtref = 1.5e-3;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual.
--------------------------------------------------------------------*/
xcrref[0] = 2.4799822399300195;
xcrref[1] = 1.1276337964368832;
xcrref[2] = 1.5028977888770491;
xcrref[3] = 1.4217816211695179;
xcrref[4] = 2.1292113035138280;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error.
--------------------------------------------------------------------*/
xceref[0] = 1.0900140297820550e-04;
xceref[1] = 3.7343951769282091e-05;
xceref[2] = 5.0092785406541633e-05;
xceref[3] = 4.7671093939528255e-05;
xceref[4] = 1.3621613399213001e-04;
/*--------------------------------------------------------------------
c reference data for 102X102X102 grids after 400 time steps,
c with DT = 1.0d-03
--------------------------------------------------------------------*/
} else if (grid_points[0] == 102 &&
grid_points[1] == 102 &&
grid_points[2] == 102 &&
no_time_steps == 400) {
*cclass = 'B';
dtref = 1.0e-3;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual.
--------------------------------------------------------------------*/
xcrref[0] = 0.6903293579998e+02;
xcrref[1] = 0.3095134488084e+02;
xcrref[2] = 0.4103336647017e+02;
xcrref[3] = 0.3864769009604e+02;
xcrref[4] = 0.5643482272596e+02;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error.
--------------------------------------------------------------------*/
xceref[0] = 0.9810006190188e-02;
xceref[1] = 0.1022827905670e-02;
xceref[2] = 0.1720597911692e-02;
xceref[3] = 0.1694479428231e-02;
xceref[4] = 0.1847456263981e-01;
/*--------------------------------------------------------------------
c reference data for 162X162X162 grids after 400 time steps,
c with DT = 0.67d-03
--------------------------------------------------------------------*/
} else if (grid_points[0] == 162 &&
grid_points[1] == 162 &&
grid_points[2] == 162 &&
no_time_steps == 400) {
*cclass = 'C';
dtref = 0.67e-3;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual.
--------------------------------------------------------------------*/
xcrref[0] = 0.5881691581829e+03;
xcrref[1] = 0.2454417603569e+03;
xcrref[2] = 0.3293829191851e+03;
xcrref[3] = 0.3081924971891e+03;
xcrref[4] = 0.4597223799176e+03;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error.
--------------------------------------------------------------------*/
xceref[0] = 0.2598120500183e+00;
xceref[1] = 0.2590888922315e-01;
xceref[2] = 0.5132886416320e-01;
xceref[3] = 0.4806073419454e-01;
xceref[4] = 0.5483377491301e+00;
} else {
*verified = FALSE;
}
/*--------------------------------------------------------------------
c verification test for residuals if gridsize is either 12X12X12 or
c 64X64X64 or 102X102X102 or 162X162X162
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Compute the difference of solution values and the known reference values.
--------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
xcrdif[m] = fabs((xcr[m]-xcrref[m])/xcrref[m]) ;
xcedif[m] = fabs((xce[m]-xceref[m])/xceref[m]);
}
/*--------------------------------------------------------------------
c Output the comparison of computed results to known cases.
--------------------------------------------------------------------*/
if (*cclass != 'U') {
printf(" Verification being performed for cclass %1c\n", *cclass);
printf(" accuracy setting for epsilon = %20.13e\n", epsilon);
if (fabs(dt-dtref) > epsilon) {
*verified = FALSE;
*cclass = 'U';
printf(" DT does not match the reference value of %15.8e\n", dtref);
}
} else {
printf(" Unknown cclass\n");
}
if (*cclass != 'U') {
printf(" Comparison of RMS-norms of residual\n");
} else {
printf(" RMS-norms of residual\n");
}
for (m = 0; m < 5; m++) {
if (*cclass == 'U') {
printf(" %2d%20.13e\n", m, xcr[m]);
} else if (xcrdif[m] > epsilon) {
*verified = FALSE;
printf(" FAILURE: %2d%20.13e%20.13e%20.13e\n",
m,xcr[m],xcrref[m],xcrdif[m]);
} else {
printf(" %2d%20.13e%20.13e%20.13e\n",
m,xcr[m],xcrref[m],xcrdif[m]);
}
}
if (*cclass != 'U') {
printf(" Comparison of RMS-norms of solution error\n");
} else {
printf(" RMS-norms of solution error\n");
}
for (m = 0; m < 5; m++) {
if (*cclass == 'U') {
printf(" %2d%20.13e\n", m, xce[m]);
} else if (xcedif[m] > epsilon) {
*verified = FALSE;
printf(" FAILURE: %2d%20.13e%20.13e%20.13e\n",
m,xce[m],xceref[m],xcedif[m]);
} else {
printf(" %2d%20.13e%20.13e%20.13e\n",
m,xce[m],xceref[m],xcedif[m]);
}
}
if (*cclass == 'U') {
printf(" No reference values provided\n");
printf(" No verification performed\n");
} else if (*verified) {
printf(" Verification Successful\n");
} else {
printf(" Verification failed\n");
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void x_solve(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c this function performs the solution of the approximate factorization
c step in the x-direction for all five matrix components
c simultaneously. The Thomas algorithm is employed to solve the
c systems for the x-lines. Boundary conditions are non-periodic
--------------------------------------------------------------------*/
int i, j, k, n, i1, i2, m;
double fac1, fac2;
/*--------------------------------------------------------------------
c FORWARD ELIMINATION
--------------------------------------------------------------------*/
lhsx();
/*--------------------------------------------------------------------
c perform the Thomas algorithm; first, FORWARD ELIMINATION
--------------------------------------------------------------------*/
n = 0;
for (i = 0; i <= grid_points[0]-3; i++) {
i1 = i + 1;
i2 = i + 2;
#pragma omp for
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
fac1 = 1./lhs[n+2][i][j][k];
lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];
lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];
for (m = 0; m < 3; m++) {
rhs[m][i][j][k] = fac1*rhs[m][i][j][k];
}
lhs[n+2][i1][j][k] = lhs[n+2][i1][j][k] -
lhs[n+1][i1][j][k]*lhs[n+3][i][j][k];
lhs[n+3][i1][j][k] = lhs[n+3][i1][j][k] -
lhs[n+1][i1][j][k]*lhs[n+4][i][j][k];
for (m = 0; m < 3; m++) {
rhs[m][i1][j][k] = rhs[m][i1][j][k] -
lhs[n+1][i1][j][k]*rhs[m][i][j][k];
}
lhs[n+1][i2][j][k] = lhs[n+1][i2][j][k] -
lhs[n+0][i2][j][k]*lhs[n+3][i][j][k];
lhs[n+2][i2][j][k] = lhs[n+2][i2][j][k] -
lhs[n+0][i2][j][k]*lhs[n+4][i][j][k];
for (m = 0; m < 3; m++) {
rhs[m][i2][j][k] = rhs[m][i2][j][k] -
lhs[n+0][i2][j][k]*rhs[m][i][j][k];
}
}
}
}
/*--------------------------------------------------------------------
c The last two rows in this grid block are a bit different,
c since they do not have two more rows available for the
c elimination of off-diagonal entries
--------------------------------------------------------------------*/
i = grid_points[0]-2;
i1 = grid_points[0]-1;
#pragma omp for
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
fac1 = 1.0/lhs[n+2][i][j][k];
lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];
lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];
for (m = 0; m < 3; m++) {
rhs[m][i][j][k] = fac1*rhs[m][i][j][k];
}
lhs[n+2][i1][j][k] = lhs[n+2][i1][j][k] -
lhs[n+1][i1][j][k]*lhs[n+3][i][j][k];
lhs[n+3][i1][j][k] = lhs[n+3][i1][j][k] -
lhs[n+1][i1][j][k]*lhs[n+4][i][j][k];
for (m = 0; m < 3; m++) {
rhs[m][i1][j][k] = rhs[m][i1][j][k] -
lhs[n+1][i1][j][k]*rhs[m][i][j][k];
}
/*--------------------------------------------------------------------
c scale the last row immediately
--------------------------------------------------------------------*/
fac2 = 1./lhs[n+2][i1][j][k];
for (m = 0; m < 3; m++) {
rhs[m][i1][j][k] = fac2*rhs[m][i1][j][k];
}
}
}
/*--------------------------------------------------------------------
c do the u+c and the u-c factors
--------------------------------------------------------------------*/
for (m = 3; m < 5; m++) {
n = (m-3+1)*5;
for (i = 0; i <= grid_points[0]-3; i++) {
i1 = i + 1;
i2 = i + 2;
#pragma omp for
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
fac1 = 1./lhs[n+2][i][j][k];
lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];
lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];
rhs[m][i][j][k] = fac1*rhs[m][i][j][k];
lhs[n+2][i1][j][k] = lhs[n+2][i1][j][k] -
lhs[n+1][i1][j][k]*lhs[n+3][i][j][k];
lhs[n+3][i1][j][k] = lhs[n+3][i1][j][k] -
lhs[n+1][i1][j][k]*lhs[n+4][i][j][k];
rhs[m][i1][j][k] = rhs[m][i1][j][k] -
lhs[n+1][i1][j][k]*rhs[m][i][j][k];
lhs[n+1][i2][j][k] = lhs[n+1][i2][j][k] -
lhs[n+0][i2][j][k]*lhs[n+3][i][j][k];
lhs[n+2][i2][j][k] = lhs[n+2][i2][j][k] -
lhs[n+0][i2][j][k]*lhs[n+4][i][j][k];
rhs[m][i2][j][k] = rhs[m][i2][j][k] -
lhs[n+0][i2][j][k]*rhs[m][i][j][k];
}
}
}
/*--------------------------------------------------------------------
c And again the last two rows separately
--------------------------------------------------------------------*/
i = grid_points[0]-2;
i1 = grid_points[0]-1;
#pragma omp for
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
fac1 = 1./lhs[n+2][i][j][k];
lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];
lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];
rhs[m][i][j][k] = fac1*rhs[m][i][j][k];
lhs[n+2][i1][j][k] = lhs[n+2][i1][j][k] -
lhs[n+1][i1][j][k]*lhs[n+3][i][j][k];
lhs[n+3][i1][j][k] = lhs[n+3][i1][j][k] -
lhs[n+1][i1][j][k]*lhs[n+4][i][j][k];
rhs[m][i1][j][k] = rhs[m][i1][j][k] -
lhs[n+1][i1][j][k]*rhs[m][i][j][k];
/*--------------------------------------------------------------------
c Scale the last row immediately
--------------------------------------------------------------------*/
fac2 = 1./lhs[n+2][i1][j][k];
rhs[m][i1][j][k] = fac2*rhs[m][i1][j][k];
}
}
}
/*--------------------------------------------------------------------
c BACKSUBSTITUTION
--------------------------------------------------------------------*/
i = grid_points[0]-2;
i1 = grid_points[0]-1;
n = 0;
for (m = 0; m < 3; m++) {
#pragma omp for
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k] -
lhs[n+3][i][j][k]*rhs[m][i1][j][k];
}
}
}
for (m = 3; m < 5; m++) {
#pragma omp for
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
n = (m-3+1)*5;
rhs[m][i][j][k] = rhs[m][i][j][k] -
lhs[n+3][i][j][k]*rhs[m][i1][j][k];
}
}
}
/*--------------------------------------------------------------------
c The first three factors
--------------------------------------------------------------------*/
n = 0;
for (i = grid_points[0]-3; i >= 0; i--) {
i1 = i + 1;
i2 = i + 2;
#pragma omp for
for (m = 0; m < 3; m++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k] -
lhs[n+3][i][j][k]*rhs[m][i1][j][k] -
lhs[n+4][i][j][k]*rhs[m][i2][j][k];
}
}
}
}
/*--------------------------------------------------------------------
c And the remaining two
--------------------------------------------------------------------*/
for (m = 3; m < 5; m++) {
n = (m-3+1)*5;
for (i = grid_points[0]-3; i >= 0; i--) {
i1 = i + 1;
i2 = i + 2;
#pragma omp for
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 1; k <= grid_points[2]-2; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k] -
lhs[n+3][i][j][k]*rhs[m][i1][j][k] -
lhs[n+4][i][j][k]*rhs[m][i2][j][k];
}
}
}
}
/*--------------------------------------------------------------------
c Do the block-diagonal inversion
--------------------------------------------------------------------*/
ninvr();
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void y_solve(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c this function performs the solution of the approximate factorization
c step in the y-direction for all five matrix components
c simultaneously. The Thomas algorithm is employed to solve the
c systems for the y-lines. Boundary conditions are non-periodic
--------------------------------------------------------------------*/
int i, j, k, n, j1, j2, m;
double fac1, fac2;
/*--------------------------------------------------------------------
c FORWARD ELIMINATION
--------------------------------------------------------------------*/
lhsy();
n = 0;
for (j = 0; j <= grid_points[1]-3; j++) {
j1 = j + 1;
j2 = j + 2;
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (k = 1; k <= grid_points[2]-2; k++) {
fac1 = 1./lhs[n+2][i][j][k];
lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];
lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];
for (m = 0; m < 3; m++) {
rhs[m][i][j][k] = fac1*rhs[m][i][j][k];
}
lhs[n+2][i][j1][k] = lhs[n+2][i][j1][k] -
lhs[n+1][i][j1][k]*lhs[n+3][i][j][k];
lhs[n+3][i][j1][k] = lhs[n+3][i][j1][k] -
lhs[n+1][i][j1][k]*lhs[n+4][i][j][k];
for (m = 0; m < 3; m++) {
rhs[m][i][j1][k] = rhs[m][i][j1][k] -
lhs[n+1][i][j1][k]*rhs[m][i][j][k];
}
lhs[n+1][i][j2][k] = lhs[n+1][i][j2][k] -
lhs[n+0][i][j2][k]*lhs[n+3][i][j][k];
lhs[n+2][i][j2][k] = lhs[n+2][i][j2][k] -
lhs[n+0][i][j2][k]*lhs[n+4][i][j][k];
for (m = 0; m < 3; m++) {
rhs[m][i][j2][k] = rhs[m][i][j2][k] -
lhs[n+0][i][j2][k]*rhs[m][i][j][k];
}
}
}
}
/*--------------------------------------------------------------------
c The last two rows in this grid block are a bit different,
c since they do not have two more rows available for the
c elimination of off-diagonal entries
--------------------------------------------------------------------*/
j = grid_points[1]-2;
j1 = grid_points[1]-1;
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (k = 1; k <= grid_points[2]-2; k++) {
fac1 = 1./lhs[n+2][i][j][k];
lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];
lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];
for (m = 0; m < 3; m++) {
rhs[m][i][j][k] = fac1*rhs[m][i][j][k];
}
lhs[n+2][i][j1][k] = lhs[n+2][i][j1][k] -
lhs[n+1][i][j1][k]*lhs[n+3][i][j][k];
lhs[n+3][i][j1][k] = lhs[n+3][i][j1][k] -
lhs[n+1][i][j1][k]*lhs[n+4][i][j][k];
for (m = 0; m < 3; m++) {
rhs[m][i][j1][k] = rhs[m][i][j1][k] -
lhs[n+1][i][j1][k]*rhs[m][i][j][k];
}
/*--------------------------------------------------------------------
c scale the last row immediately
--------------------------------------------------------------------*/
fac2 = 1./lhs[n+2][i][j1][k];
for (m = 0; m < 3; m++) {
rhs[m][i][j1][k] = fac2*rhs[m][i][j1][k];
}
}
}
/*--------------------------------------------------------------------
c do the u+c and the u-c factors
--------------------------------------------------------------------*/
for (m = 3; m < 5; m++) {
n = (m-3+1)*5;
for (j = 0; j <= grid_points[1]-3; j++) {
j1 = j + 1;
j2 = j + 2;
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (k = 1; k <= grid_points[2]-2; k++) {
fac1 = 1./lhs[n+2][i][j][k];
lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];
lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];
rhs[m][i][j][k] = fac1*rhs[m][i][j][k];
lhs[n+2][i][j1][k] = lhs[n+2][i][j1][k] -
lhs[n+1][i][j1][k]*lhs[n+3][i][j][k];
lhs[n+3][i][j1][k] = lhs[n+3][i][j1][k] -
lhs[n+1][i][j1][k]*lhs[n+4][i][j][k];
rhs[m][i][j1][k] = rhs[m][i][j1][k] -
lhs[n+1][i][j1][k]*rhs[m][i][j][k];
lhs[n+1][i][j2][k] = lhs[n+1][i][j2][k] -
lhs[n+0][i][j2][k]*lhs[n+3][i][j][k];
lhs[n+2][i][j2][k] = lhs[n+2][i][j2][k] -
lhs[n+0][i][j2][k]*lhs[n+4][i][j][k];
rhs[m][i][j2][k] = rhs[m][i][j2][k] -
lhs[n+0][i][j2][k]*rhs[m][i][j][k];
}
}
}
/*--------------------------------------------------------------------
c And again the last two rows separately
--------------------------------------------------------------------*/
j = grid_points[1]-2;
j1 = grid_points[1]-1;
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (k = 1; k <= grid_points[2]-2; k++) {
fac1 = 1./lhs[n+2][i][j][k];
lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];
lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];
rhs[m][i][j][k] = fac1*rhs[m][i][j][k];
lhs[n+2][i][j1][k] = lhs[n+2][i][j1][k] -
lhs[n+1][i][j1][k]*lhs[n+3][i][j][k];
lhs[n+3][i][j1][k] = lhs[n+3][i][j1][k] -
lhs[n+1][i][j1][k]*lhs[n+4][i][j][k];
rhs[m][i][j1][k] = rhs[m][i][j1][k] -
lhs[n+1][i][j1][k]*rhs[m][i][j][k];
/*--------------------------------------------------------------------
c Scale the last row immediately
--------------------------------------------------------------------*/
fac2 = 1./lhs[n+2][i][j1][k];
rhs[m][i][j1][k] = fac2*rhs[m][i][j1][k];
}
}
}
/*--------------------------------------------------------------------
c BACKSUBSTITUTION
--------------------------------------------------------------------*/
j = grid_points[1]-2;
j1 = grid_points[1]-1;
n = 0;
for (m = 0; m < 3; m++) {
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (k = 1; k <= grid_points[2]-2; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k] -
lhs[n+3][i][j][k]*rhs[m][i][j1][k];
}
}
}
for (m = 3; m < 5; m++) {
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (k = 1; k <= grid_points[2]-2; k++) {
n = (m-3+1)*5;
rhs[m][i][j][k] = rhs[m][i][j][k] -
lhs[n+3][i][j][k]*rhs[m][i][j1][k];
}
}
}
/*--------------------------------------------------------------------
c The first three factors
--------------------------------------------------------------------*/
n = 0;
for (m = 0; m < 3; m++) {
for (j = grid_points[1]-3; j >= 0; j--) {
j1 = j + 1;
j2 = j + 2;
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (k = 1; k <= grid_points[2]-2; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k] -
lhs[n+3][i][j][k]*rhs[m][i][j1][k] -
lhs[n+4][i][j][k]*rhs[m][i][j2][k];
}
}
}
}
/*--------------------------------------------------------------------
c And the remaining two
--------------------------------------------------------------------*/
for (m = 3; m < 5; m++) {
n = (m-3+1)*5;
for (j = grid_points[1]-3; j >= 0; j--) {
j1 = j + 1;
j2 = j1 + 1;
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (k = 1; k <= grid_points[2]-2; k++) {
rhs[m][i][j][k] = rhs[m][i][j][k] -
lhs[n+3][i][j][k]*rhs[m][i][j1][k] -
lhs[n+4][i][j][k]*rhs[m][i][j2][k];
}
}
}
}
pinvr();
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void z_solve(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c this function performs the solution of the approximate factorization
c step in the z-direction for all five matrix components
c simultaneously. The Thomas algorithm is employed to solve the
c systems for the z-lines. Boundary conditions are non-periodic
c-------------------------------------------------------------------*/
int i, j, k, n, k1, k2, m;
double fac1, fac2;
/*--------------------------------------------------------------------
c FORWARD ELIMINATION
c-------------------------------------------------------------------*/
lhsz();
n = 0;
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 0; k <= grid_points[2]-3; k++) {
k1 = k + 1;
k2 = k + 2;
fac1 = 1./lhs[n+2][i][j][k];
lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];
lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];
for (m = 0; m < 3; m++) {
rhs[m][i][j][k] = fac1*rhs[m][i][j][k];
}
lhs[n+2][i][j][k1] = lhs[n+2][i][j][k1] -
lhs[n+1][i][j][k1]*lhs[n+3][i][j][k];
lhs[n+3][i][j][k1] = lhs[n+3][i][j][k1] -
lhs[n+1][i][j][k1]*lhs[n+4][i][j][k];
for (m = 0; m < 3; m++) {
rhs[m][i][j][k1] = rhs[m][i][j][k1] -
lhs[n+1][i][j][k1]*rhs[m][i][j][k];
}
lhs[n+1][i][j][k2] = lhs[n+1][i][j][k2] -
lhs[n+0][i][j][k2]*lhs[n+3][i][j][k];
lhs[n+2][i][j][k2] = lhs[n+2][i][j][k2] -
lhs[n+0][i][j][k2]*lhs[n+4][i][j][k];
for (m = 0; m < 3; m++) {
rhs[m][i][j][k2] = rhs[m][i][j][k2] -
lhs[n+0][i][j][k2]*rhs[m][i][j][k];
}
}
}
}
/*--------------------------------------------------------------------
c The last two rows in this grid block are a bit different,
c since they do not have two more rows available for the
c elimination of off-diagonal entries
c-------------------------------------------------------------------*/
k = grid_points[2]-2;
k1 = grid_points[2]-1;
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
fac1 = 1./lhs[n+2][i][j][k];
lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];
lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];
for (m = 0; m < 3; m++) {
rhs[m][i][j][k] = fac1*rhs[m][i][j][k];
}
lhs[n+2][i][j][k1] = lhs[n+2][i][j][k1] -
lhs[n+1][i][j][k1]*lhs[n+3][i][j][k];
lhs[n+3][i][j][k1] = lhs[n+3][i][j][k1] -
lhs[n+1][i][j][k1]*lhs[n+4][i][j][k];
for (m = 0; m < 3; m++) {
rhs[m][i][j][k1] = rhs[m][i][j][k1] -
lhs[n+1][i][j][k1]*rhs[m][i][j][k];
}
/*--------------------------------------------------------------------
c scale the last row immediately
c-------------------------------------------------------------------*/
fac2 = 1./lhs[n+2][i][j][k1];
for (m = 0; m < 3; m++) {
rhs[m][i][j][k1] = fac2*rhs[m][i][j][k1];
}
}
}
/*--------------------------------------------------------------------
c do the u+c and the u-c factors
c-------------------------------------------------------------------*/
for (m = 3; m < 5; m++) {
n = (m-3+1)*5;
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = 0; k <= grid_points[2]-3; k++) {
k1 = k + 1;
k2 = k + 2;
fac1 = 1./lhs[n+2][i][j][k];
lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];
lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];
rhs[m][i][j][k] = fac1*rhs[m][i][j][k];
lhs[n+2][i][j][k1] = lhs[n+2][i][j][k1] -
lhs[n+1][i][j][k1]*lhs[n+3][i][j][k];
lhs[n+3][i][j][k1] = lhs[n+3][i][j][k1] -
lhs[n+1][i][j][k1]*lhs[n+4][i][j][k];
rhs[m][i][j][k1] = rhs[m][i][j][k1] -
lhs[n+1][i][j][k1]*rhs[m][i][j][k];
lhs[n+1][i][j][k2] = lhs[n+1][i][j][k2] -
lhs[n+0][i][j][k2]*lhs[n+3][i][j][k];
lhs[n+2][i][j][k2] = lhs[n+2][i][j][k2] -
lhs[n+0][i][j][k2]*lhs[n+4][i][j][k];
rhs[m][i][j][k2] = rhs[m][i][j][k2] -
lhs[n+0][i][j][k2]*rhs[m][i][j][k];
}
}
}
/*--------------------------------------------------------------------
c And again the last two rows separately
c-------------------------------------------------------------------*/
k = grid_points[2]-2;
k1 = grid_points[2]-1;
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
fac1 = 1./lhs[n+2][i][j][k];
lhs[n+3][i][j][k] = fac1*lhs[n+3][i][j][k];
lhs[n+4][i][j][k] = fac1*lhs[n+4][i][j][k];
rhs[m][i][j][k] = fac1*rhs[m][i][j][k];
lhs[n+2][i][j][k1] = lhs[n+2][i][j][k1] -
lhs[n+1][i][j][k1]*lhs[n+3][i][j][k];
lhs[n+3][i][j][k1] = lhs[n+3][i][j][k1] -
lhs[n+1][i][j][k1]*lhs[n+4][i][j][k];
rhs[m][i][j][k1] = rhs[m][i][j][k1] -
lhs[n+1][i][j][k1]*rhs[m][i][j][k];
/*--------------------------------------------------------------------
c Scale the last row immediately (some of this is overkill
c if this is the last cell)
c-------------------------------------------------------------------*/
fac2 = 1./lhs[n+2][i][j][k1];
rhs[m][i][j][k1] = fac2*rhs[m][i][j][k1];
}
}
}
/*--------------------------------------------------------------------
c BACKSUBSTITUTION
c-------------------------------------------------------------------*/
k = grid_points[2]-2;
k1 = grid_points[2]-1;
n = 0;
for (m = 0; m < 3; m++) {
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
rhs[m][i][j][k] = rhs[m][i][j][k] -
lhs[n+3][i][j][k]*rhs[m][i][j][k1];
}
}
}
for (m = 3; m < 5; m++) {
n = (m-3+1)*5;
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
rhs[m][i][j][k] = rhs[m][i][j][k] -
lhs[n+3][i][j][k]*rhs[m][i][j][k1];
}
}
}
/*--------------------------------------------------------------------
c Whether or not this is the last processor, we always have
c to complete the back-substitution
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c The first three factors
c-------------------------------------------------------------------*/
n = 0;
for (m = 0; m < 3; m++) {
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = grid_points[2]-3; k >= 0; k--) {
k1 = k + 1;
k2 = k + 2;
rhs[m][i][j][k] = rhs[m][i][j][k] -
lhs[n+3][i][j][k]*rhs[m][i][j][k1] -
lhs[n+4][i][j][k]*rhs[m][i][j][k2];
}
}
}
}
/*--------------------------------------------------------------------
c And the remaining two
c-------------------------------------------------------------------*/
for (m = 3; m < 5; m++) {
n = (m-3+1)*5;
#pragma omp for
for (i = 1; i <= grid_points[0]-2; i++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (k = grid_points[2]-3; k >= 0; k--) {
k1 = k + 1;
k2 = k + 2;
rhs[m][i][j][k] = rhs[m][i][j][k] -
lhs[n+3][i][j][k]*rhs[m][i][j][k1] -
lhs[n+4][i][j][k]*rhs[m][i][j][k2];
}
}
}
}
tzetar();
}
/* cat ./common/c_print_results.c */
/*****************************************************************/
/****** C _ P R I N T _ R E S U L T S ******/
/*****************************************************************/
void c_print_results( char *name,
char cclass,
int n1,
int n2,
int n3,
int niter,
int nthreads,
double t,
double mops,
char *optype,
int passed_verification,
char *npbversion,
char *compiletime,
char *cc,
char *clink,
char *c_lib,
char *c_inc,
char *cflags,
char *clinkflags,
char *rand)
{
char *evalue="1000";
printf( "\n\n %s Benchmark Completed\n", name );
printf( " Class = %c\n", cclass );
if( n2 == 0 && n3 == 0 )
printf( " Size = %12d\n", n1 ); /* as in IS */
else
printf( " Size = %3dx%3dx%3d\n", n1,n2,n3 );
printf( " Iterations = %12d\n", niter );
printf( " Threads = %12d\n", nthreads );
printf( " Time in seconds = %12.2f\n", t );
printf( " Mop/s total = %12.2f\n", mops );
printf( " Operation type = %24s\n", optype);
if( passed_verification )
printf( " Verification = SUCCESSFUL\n" );
else
printf( " Verification = UNSUCCESSFUL\n" );
printf( " Version = %12s\n", npbversion );
printf( " Compile date = %12s\n", compiletime );
printf( "\n Compile options:\n" );
printf( " CC = %s\n", cc );
printf( " CLINK = %s\n", clink );
printf( " C_LIB = %s\n", c_lib );
printf( " C_INC = %s\n", c_inc );
printf( " CFLAGS = %s\n", cflags );
printf( " CLINKFLAGS = %s\n", clinkflags );
printf( " RAND = %s\n", rand );
#ifdef SMP
evalue = getenv("MP_SET_NUMTHREADS");
printf( " MULTICPUS = %s\n", evalue );
#endif
/* printf( "\n\n" );
printf( " Please send the results of this run to:\n\n" );
printf( " NPB Development Team\n" );
printf( " Internet: npb@nas.nasa.gov\n \n" );
printf( " If email is not available, send this to:\n\n" );
printf( " MS T27A-1\n" );
printf( " NASA Ames Research Center\n" );
printf( " Moffett Field, CA 94035-1000\n\n" );
printf( " Fax: 415-604-3957\n\n" );*/
}
/*
cat ./common/c_timers.c
*/
/*
#include "wtime.h"
#if defined(IBM)
#define wtime wtime
#elif defined(CRAY)
#define wtime WTIME
#else
#define wtime wtime_
#endif
*/
/* Prototype */
void wtime( double * );
/*****************************************************************/
/****** E L A P S E D _ T I M E ******/
/*****************************************************************/
double elapsed_time( void )
{
double t;
wtime( &t );
return( t );
}
double start[64], elapsed[64];
/*****************************************************************/
/****** T I M E R _ C L E A R ******/
/*****************************************************************/
void timer_clear( int n )
{
elapsed[n] = 0.0;
}
/*****************************************************************/
/****** T I M E R _ S T A R T ******/
/*****************************************************************/
void timer_start( int n )
{
start[n] = elapsed_time();
}
/*****************************************************************/
/****** T I M E R _ S T O P ******/
/*****************************************************************/
void timer_stop( int n )
{
double t, now;
now = elapsed_time();
t = now - start[n];
elapsed[n] += t;
}
/*****************************************************************/
/****** T I M E R _ R E A D ******/
/*****************************************************************/
double timer_read( int n )
{
return( elapsed[n] );
}
void wtime(double *t)
{
static int sec = -1;
struct timeval tv;
gettimeofday(&tv, (void *)0);
// gettimeofday(&tv, (struct timezone *)0);
if (sec < 0) sec = tv.tv_sec;
*t = (tv.tv_sec - sec) + 1.0e-6*tv.tv_usec;
}
// common/c_randdp.c
/*
*/
#if defined(USE_POW)
#define r23 pow(0.5, 23.0)
#define r46 (r23*r23)
#define t23 pow(2.0, 23.0)
#define t46 (t23*t23)
#else
#define r23 (0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5)
#define r46 (r23*r23)
#define t23 (2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0)
#define t46 (t23*t23)
#endif
/*c---------------------------------------------------------------------
c---------------------------------------------------------------------*/
double randlc (double *x, double a) {
/*c---------------------------------------------------------------------
c---------------------------------------------------------------------*/
/*c---------------------------------------------------------------------
c
c This routine returns a uniform pseudorandom double precision number in the
c range (0, 1) by using the linear congruential generator
c
c x_{k+1} = a x_k (mod 2^46)
c
c where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers
c before repeating. The argument A is the same as 'a' in the above formula,
c and X is the same as x_0. A and X must be odd double precision integers
c in the range (1, 2^46). The returned value RANDLC is normalized to be
c between 0 and 1, i.e. RANDLC = 2^(-46) * x_1. X is updated to contain
c the new seed x_1, so that subsequent calls to RANDLC using the same
c arguments will generate a continuous sequence.
c
c This routine should produce the same results on any computer with at least
c 48 mantissa bits in double precision floating point data. On 64 bit
c systems, double precision should be disabled.
c
c David H. Bailey October 26, 1990
c
c---------------------------------------------------------------------*/
double t1,t2,t3,t4,a1,a2,x1,x2,z;
/*c---------------------------------------------------------------------
c Break A into two parts such that A = 2^23 * A1 + A2.
c---------------------------------------------------------------------*/
t1 = r23 * a;
a1 = (int)t1;
a2 = a - t23 * a1;
/*c---------------------------------------------------------------------
c Break X into two parts such that X = 2^23 * X1 + X2, compute
c Z = A1 * X2 + A2 * X1 (mod 2^23), and then
c X = 2^23 * Z + A2 * X2 (mod 2^46).
c---------------------------------------------------------------------*/
t1 = r23 * (*x);
x1 = (int)t1;
x2 = (*x) - t23 * x1;
t1 = a1 * x2 + a2 * x1;
t2 = (int)(r23 * t1);
z = t1 - t23 * t2;
t3 = t23 * z + a2 * x2;
t4 = (int)(r46 * t3);
(*x) = t3 - t46 * t4;
return (r46 * (*x));
}
/*c---------------------------------------------------------------------
c---------------------------------------------------------------------*/
void vranlc (int n, double *x_seed, double a, double* y) {
/* void vranlc (int n, double *x_seed, double a, double y[]) { */
/*c---------------------------------------------------------------------
c---------------------------------------------------------------------*/
/*c---------------------------------------------------------------------
c
c This routine generates N uniform pseudorandom double precision numbers in
c the range (0, 1) by using the linear congruential generator
c
c x_{k+1} = a x_k (mod 2^46)
c
c where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers
c before repeating. The argument A is the same as 'a' in the above formula,
c and X is the same as x_0. A and X must be odd double precision integers
c in the range (1, 2^46). The N results are placed in Y and are normalized
c to be between 0 and 1. X is updated to contain the new seed, so that
c subsequent calls to VRANLC using the same arguments will generate a
c continuous sequence. If N is zero, only initialization is performed, and
c the variables X, A and Y are ignored.
c
c This routine is the standard version designed for scalar or RISC systems.
c However, it should produce the same results on any single processor
c computer with at least 48 mantissa bits in double precision floating point
c data. On 64 bit systems, double precision should be disabled.
c
c---------------------------------------------------------------------*/
int i;
double x,t1,t2,t3,t4,a1,a2,x1,x2,z;
/*c---------------------------------------------------------------------
c Break A into two parts such that A = 2^23 * A1 + A2.
c---------------------------------------------------------------------*/
t1 = r23 * a;
a1 = (int)t1;
a2 = a - t23 * a1;
x = *x_seed;
/*c---------------------------------------------------------------------
c Generate N results. This loop is not vectorizable.
c---------------------------------------------------------------------*/
for (i = 1; i <= n; i++) {
/*c---------------------------------------------------------------------
c Break X into two parts such that X = 2^23 * X1 + X2, compute
c Z = A1 * X2 + A2 * X1 (mod 2^23), and then
c X = 2^23 * Z + A2 * X2 (mod 2^46).
c---------------------------------------------------------------------*/
t1 = r23 * x;
x1 = (int)t1;
x2 = x - t23 * x1;
t1 = a1 * x2 + a2 * x1;
t2 = (int)(r23 * t1);
z = t1 - t23 * t2;
t3 = t23 * z + a2 * x2;
t4 = (int)(r46 * t3);
x = t3 - t46 * t4;
y[i] = r46 * x;
}
*x_seed = x;
}
|
TwoCore.c | #include <stdlib.h>
#include <stdio.h>
#include <omp.h>
int main(int argc, char *argv[]){
omp_set_num_threads(2);
#pragma omp parallel
{
if(omp_get_thread_num()){
system("gcc testFile0.c -o test0");
} else {
system("gcc testFile1.c -o test1");
}
}
return 0;
} |
GB_binop__rminus_fc32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rminus_fc32)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__rminus_fc32)
// A.*B function (eWiseMult): GB (_AemultB_03__rminus_fc32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rminus_fc32)
// A*D function (colscale): GB (_AxD__rminus_fc32)
// D*A function (rowscale): GB (_DxB__rminus_fc32)
// C+=B function (dense accum): GB (_Cdense_accumB__rminus_fc32)
// C+=b function (dense accum): GB (_Cdense_accumb__rminus_fc32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rminus_fc32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rminus_fc32)
// C=scalar+B GB (_bind1st__rminus_fc32)
// C=scalar+B' GB (_bind1st_tran__rminus_fc32)
// C=A+scalar GB (_bind2nd__rminus_fc32)
// C=A'+scalar GB (_bind2nd_tran__rminus_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// B,b type: GxB_FC32_t
// BinaryOp: cij = GB_FC32_minus (bij, aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_BTYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
GxB_FC32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_FC32_minus (y, x) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RMINUS || GxB_NO_FC32 || GxB_NO_RMINUS_FC32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rminus_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__rminus_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rminus_fc32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rminus_fc32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC32_t
GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rminus_fc32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rminus_fc32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rminus_fc32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__rminus_fc32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rminus_fc32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__rminus_fc32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rminus_fc32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rminus_fc32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ;
GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC32_t bij = Bx [p] ;
Cx [p] = GB_FC32_minus (bij, x) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rminus_fc32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ;
GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GxB_FC32_t aij = Ax [p] ;
Cx [p] = GB_FC32_minus (y, aij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC32_t aij = Ax [pA] ; \
Cx [pC] = GB_FC32_minus (aij, x) ; \
}
GrB_Info GB (_bind1st_tran__rminus_fc32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC32_t aij = Ax [pA] ; \
Cx [pC] = GB_FC32_minus (y, aij) ; \
}
GrB_Info GB (_bind2nd_tran__rminus_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__first_uint8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__first_uint8)
// A.*B function (eWiseMult): GB (_AemultB_08__first_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__first_uint8)
// A.*B function (eWiseMult): GB (_AemultB_04__first_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__first_uint8)
// A*D function (colscale): GB (_AxD__first_uint8)
// D*A function (rowscale): GB (_DxB__first_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__first_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__first_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__first_uint8)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: uint8_t
// A type: uint8_t
// A pattern? 0
// B type: uint8_t
// B pattern? 1
// BinaryOp: cij = aij
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
;
// true if values of B are not used
#define GB_B_IS_PATTERN \
1 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = x ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_FIRST || GxB_NO_UINT8 || GxB_NO_FIRST_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__first_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__first_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__first_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__first_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__first_uint8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__first_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint8_t alpha_scalar ;
uint8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__first_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__first_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__first_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__first_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
; ;
Cx [p] = x ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = aij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = x ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = aij ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
b4v7ld.c | /**** BSIM4.7.0 Released by Darsen Lu 04/08/2011 ****/
/**** OpenMP support ngspice 06/28/2010 ****/
/**********
* Copyright 2006 Regents of the University of California. All rights reserved.
* File: b4ld.c of BSIM4.7.0.
* Author: 2000 Weidong Liu
* Authors: 2001- Xuemei Xi, Mohan Dunga, Ali Niknejad, Chenming Hu.
* Authors: 2006- Mohan Dunga, Ali Niknejad, Chenming Hu
* Authors: 2007- Mohan Dunga, Wenwei Yang, Ali Niknejad, Chenming Hu
* Authors: 2008- Wenwei Yang, Ali Niknejad, Chenming Hu
* Project Director: Prof. Chenming Hu.
* Modified by Xuemei Xi, 04/06/2001.
* Modified by Xuemei Xi, 10/05/2001.
* Modified by Xuemei Xi, 11/15/2002.
* Modified by Xuemei Xi, 05/09/2003.
* Modified by Xuemei Xi, 03/04/2004.
* Modified by Xuemei Xi, Mohan Dunga, 07/29/2005.
* Modified by Mohan Dunga, 12/13/2006.
* Modified by Mohan Dunga, Wenwei Yang, 05/18/2007.
* Modified by Wenwei Yang, 07/31/2008.
* Modified by Tanvir Morshed, Darsen Lu 03/27/2011
**********/
#include "ngspice/ngspice.h"
#include "ngspice/cktdefs.h"
#include "bsim4v7def.h"
#include "ngspice/trandefs.h"
#include "ngspice/const.h"
#include "ngspice/sperror.h"
#include "ngspice/devdefs.h"
#include "ngspice/suffix.h"
#define MAX_EXPL 2.688117142e+43
#define MIN_EXPL 3.720075976e-44
#define EXPL_THRESHOLD 100.0
#define MAX_EXP 5.834617425e14
#define MIN_EXP 1.713908431e-15
#define EXP_THRESHOLD 34.0
#define EPS0 8.85418e-12
#define EPSSI 1.03594e-10
#define Charge_q 1.60219e-19
#define DELTA_1 0.02
#define DELTA_2 0.02
#define DELTA_3 0.02
#define DELTA_4 0.02
#define MM 3 /* smooth coeff */
#define DEXP(A,B,C) { \
if (A > EXP_THRESHOLD) { \
B = MAX_EXP*(1.0+(A)-EXP_THRESHOLD); \
C = MAX_EXP; \
} else if (A < -EXP_THRESHOLD) { \
B = MIN_EXP; \
C = 0; \
} else { \
B = exp(A); \
C = B; \
} \
}
#ifdef USE_OMP
int BSIM4v7LoadOMP(BSIM4v7instance *here, CKTcircuit *ckt);
void BSIM4v7LoadRhsMat(GENmodel *inModel, CKTcircuit *ckt);
#endif
int BSIM4v7polyDepletion(double phi, double ngate,double epsgate, double coxe, double Vgs, double *Vgs_eff, double *dVgs_eff_dVg);
int
BSIM4v7load(
GENmodel *inModel,
CKTcircuit *ckt)
{
#ifdef USE_OMP
int idx;
BSIM4v7model *model = (BSIM4v7model*)inModel;
int error = 0;
BSIM4v7instance **InstArray;
InstArray = model->BSIM4v7InstanceArray;
#pragma omp parallel for
for (idx = 0; idx < model->BSIM4v7InstCount; idx++) {
BSIM4v7instance *here = InstArray[idx];
int local_error = BSIM4v7LoadOMP(here, ckt);
if (local_error)
error = local_error;
}
BSIM4v7LoadRhsMat(inModel, ckt);
return error;
}
int BSIM4v7LoadOMP(BSIM4v7instance *here, CKTcircuit *ckt) {
BSIM4v7model *model = BSIM4v7modPtr(here);
#else
BSIM4v7model *model = (BSIM4v7model*)inModel;
BSIM4v7instance *here;
#endif
double ceqgstot, dgstot_dvd, dgstot_dvg, dgstot_dvs, dgstot_dvb;
double ceqgdtot, dgdtot_dvd, dgdtot_dvg, dgdtot_dvs, dgdtot_dvb;
double gstot, gstotd, gstotg, gstots, gstotb, gspr, Rs, Rd;
double gdtot, gdtotd, gdtotg, gdtots, gdtotb, gdpr;
double vgs_eff, vgd_eff, dvgs_eff_dvg, dvgd_eff_dvg;
double dRs_dvg, dRd_dvg, dRs_dvb, dRd_dvb;
double dT0_dvg, dT1_dvb, dT3_dvg, dT3_dvb;
double vses, vdes, vdedo, delvses, delvded, delvdes;
double Isestot, cseshat, Idedtot, cdedhat;
#ifndef NEWCONV
double tol0, tol1, tol2, tol3, tol4, tol5, tol6;
#endif
double geltd, gcrg, gcrgg, gcrgd, gcrgs, gcrgb, ceqgcrg;
double vges, vgms, vgedo, vgmdo, vged, vgmd, delvged, delvgmd;
double delvges, delvgms, vgmb;
double gcgmgmb=0.0, gcgmdb=0.0, gcgmsb=0.0, gcdgmb, gcsgmb;
double gcgmbb=0.0, gcbgmb, qgmb, qgmid=0.0, ceqqgmid;
double vbd, vbs, vds, vgb, vgd, vgs, vgdo;
#ifndef PREDICTOR
double xfact;
#endif
double vdbs, vdbd, vsbs, vsbdo, vsbd;
double delvdbs, delvdbd, delvsbs;
double delvbd_jct, delvbs_jct, vbs_jct, vbd_jct;
double SourceSatCurrent, DrainSatCurrent;
double ag0, qgb, von, cbhat, VgstNVt, ExpVgst;
double ceqqb, ceqqd, ceqqg, ceqqjd=0.0, ceqqjs=0.0, ceq, geq;
double cdrain, cdhat, ceqdrn, ceqbd, ceqbs, ceqjd, ceqjs, gjbd, gjbs;
double czbd, czbdsw, czbdswg, czbs, czbssw, czbsswg, evbd, evbs, arg, sarg;
double delvbd, delvbs, delvds, delvgd, delvgs;
double Vfbeff, dVfbeff_dVg, dVfbeff_dVb, V3, V4;
double gcbdb, gcbgb, gcbsb, gcddb, gcdgb, gcdsb, gcgdb, gcggb, gcgsb, gcsdb;
double gcgbb, gcdbb, gcsbb, gcbbb;
double gcdbdb, gcsbsb;
double gcsgb, gcssb, MJD, MJSWD, MJSWGD, MJS, MJSWS, MJSWGS;
double qgate=0.0, qbulk=0.0, qdrn=0.0, qsrc, cqgate, cqbody, cqdrn;
double Vdb, Vds, Vgs, Vbs, Gmbs, FwdSum, RevSum;
double Igidl, Ggidld, Ggidlg, Ggidlb;
double Voxacc=0.0, dVoxacc_dVg=0.0, dVoxacc_dVb=0.0;
double Voxdepinv=0.0, dVoxdepinv_dVg=0.0, dVoxdepinv_dVd=0.0, dVoxdepinv_dVb=0.0;
double VxNVt=0.0, ExpVxNVt, Vaux=0.0, dVaux_dVg=0.0, dVaux_dVd=0.0, dVaux_dVb=0.0;
double Igc, dIgc_dVg, dIgc_dVd, dIgc_dVb;
double Igcs, dIgcs_dVg, dIgcs_dVd, dIgcs_dVb;
double Igcd, dIgcd_dVg, dIgcd_dVd, dIgcd_dVb;
double Igs, dIgs_dVg, dIgs_dVs, Igd, dIgd_dVg, dIgd_dVd;
double Igbacc, dIgbacc_dVg, dIgbacc_dVb;
double Igbinv, dIgbinv_dVg, dIgbinv_dVd, dIgbinv_dVb;
double Pigcd, dPigcd_dVg, dPigcd_dVd, dPigcd_dVb;
double Istoteq, gIstotg, gIstotd, gIstots, gIstotb;
double Idtoteq, gIdtotg, gIdtotd, gIdtots, gIdtotb;
double Ibtoteq, gIbtotg, gIbtotd, gIbtots, gIbtotb;
double Igtoteq, gIgtotg, gIgtotd, gIgtots, gIgtotb;
double Igstot, cgshat, Igdtot, cgdhat, Igbtot, cgbhat;
double Vgs_eff, Vfb=0.0, Vth_NarrowW;
/* double Vgd_eff, dVgd_eff_dVg; v4.7.0 */
double Phis, dPhis_dVb, sqrtPhis, dsqrtPhis_dVb, Vth, dVth_dVb, dVth_dVd;
double Vgst, dVgst_dVg, dVgst_dVb, dVgs_eff_dVg, Nvtms, Nvtmd;
double Vtm, Vtm0;
double n, dn_dVb, dn_dVd, voffcv, noff, dnoff_dVd, dnoff_dVb;
double V0, CoxWLcen, QovCox, LINK;
double DeltaPhi, dDeltaPhi_dVg, VgDP, dVgDP_dVg;
double Cox, Tox, Tcen, dTcen_dVg, dTcen_dVd, dTcen_dVb;
double Ccen, Coxeff, dCoxeff_dVd, dCoxeff_dVg, dCoxeff_dVb;
double Denomi, dDenomi_dVg, dDenomi_dVd, dDenomi_dVb;
double ueff, dueff_dVg, dueff_dVd, dueff_dVb;
double Esat, Vdsat;
double EsatL, dEsatL_dVg, dEsatL_dVd, dEsatL_dVb;
double dVdsat_dVg, dVdsat_dVb, dVdsat_dVd, Vasat, dAlphaz_dVg, dAlphaz_dVb;
double dVasat_dVg, dVasat_dVb, dVasat_dVd, Va, dVa_dVd, dVa_dVg, dVa_dVb;
double Vbseff, dVbseff_dVb, VbseffCV, dVbseffCV_dVb;
double VgsteffVth, dT11_dVg;
double Arg1, One_Third_CoxWL, Two_Third_CoxWL, Alphaz, CoxWL;
double T0=0.0, dT0_dVg, dT0_dVd, dT0_dVb;
double T1, dT1_dVg, dT1_dVd, dT1_dVb;
double T2, dT2_dVg, dT2_dVd, dT2_dVb;
double T3, dT3_dVg, dT3_dVd, dT3_dVb;
double T4, dT4_dVd, dT4_dVb;
double T5, dT5_dVg, dT5_dVd, dT5_dVb;
double T6, dT6_dVg, dT6_dVd, dT6_dVb;
double T7, dT7_dVg, dT7_dVd, dT7_dVb;
double T8, dT8_dVg, dT8_dVd, dT8_dVb;
double T9, dT9_dVg, dT9_dVd, dT9_dVb;
double T10, dT10_dVg, dT10_dVb, dT10_dVd;
double T11, T12, T13, T14;
double tmp, Abulk, dAbulk_dVb, Abulk0, dAbulk0_dVb;
double Cclm, dCclm_dVg, dCclm_dVd, dCclm_dVb;
double FP, dFP_dVg, PvagTerm, dPvagTerm_dVg, dPvagTerm_dVd, dPvagTerm_dVb;
double VADITS, dVADITS_dVg, dVADITS_dVd;
double Lpe_Vb, dDITS_Sft_dVb, dDITS_Sft_dVd;
double DITS_Sft2, dDITS_Sft2_dVd; /* v4.7 New DITS */
double VACLM, dVACLM_dVg, dVACLM_dVd, dVACLM_dVb;
double VADIBL, dVADIBL_dVg, dVADIBL_dVd, dVADIBL_dVb;
double Xdep, dXdep_dVb, lt1, dlt1_dVb, ltw, dltw_dVb, Delt_vth, dDelt_vth_dVb;
double Theta0, dTheta0_dVb;
double TempRatio, tmp1, tmp2, tmp3, tmp4;
double DIBL_Sft, dDIBL_Sft_dVd, Lambda, dLambda_dVg;
double Idtot, Ibtot, a1, ScalingFactor;
double Vgsteff, dVgsteff_dVg, dVgsteff_dVd, dVgsteff_dVb;
double Vdseff, dVdseff_dVg, dVdseff_dVd, dVdseff_dVb;
double VdseffCV, dVdseffCV_dVg, dVdseffCV_dVd, dVdseffCV_dVb;
double diffVds, dAbulk_dVg;
double beta, dbeta_dVg, dbeta_dVd, dbeta_dVb;
double gche, dgche_dVg, dgche_dVd, dgche_dVb;
double fgche1, dfgche1_dVg, dfgche1_dVd, dfgche1_dVb;
double fgche2, dfgche2_dVg, dfgche2_dVd, dfgche2_dVb;
double Idl, dIdl_dVg, dIdl_dVd, dIdl_dVb;
double Idsa, dIdsa_dVg, dIdsa_dVd, dIdsa_dVb;
double Ids, Gm, Gds, Gmb, devbs_dvb, devbd_dvb;
double Isub, Gbd, Gbg, Gbb;
double VASCBE, dVASCBE_dVg, dVASCBE_dVd, dVASCBE_dVb;
double CoxeffWovL;
double Rds, dRds_dVg, dRds_dVb, WVCox, WVCoxRds;
double Vgst2Vtm, VdsatCV;
double Leff, Weff, dWeff_dVg, dWeff_dVb;
double AbulkCV, dAbulkCV_dVb;
double qcheq, qdef, gqdef=0.0, cqdef=0.0, cqcheq=0.0;
double gcqdb=0.0, gcqsb=0.0, gcqgb=0.0, gcqbb=0.0;
double dxpart, sxpart, ggtg, ggtd, ggts, ggtb;
double ddxpart_dVd, ddxpart_dVg, ddxpart_dVb, ddxpart_dVs;
double dsxpart_dVd, dsxpart_dVg, dsxpart_dVb, dsxpart_dVs;
double gbspsp, gbbdp, gbbsp, gbspg, gbspb, gbspdp;
double gbdpdp, gbdpg, gbdpb, gbdpsp;
double qgdo, qgso, cgdo, cgso;
double Cgg, Cgd, Cgb, Cdg, Cdd, Cds;
double Csg, Csd, Css, Csb, Cbg, Cbd, Cbb;
double Cgg1, Cgd1, Cgb1, Cbg1, Cbb1, Cbd1, Qac0, Qsub0;
double dQac0_dVg, dQac0_dVb, dQsub0_dVg, dQsub0_dVd, dQsub0_dVb;
double ggidld, ggidlg, ggidlb, ggislg, ggislb, ggisls;
double Igisl, Ggislg, Ggislb, Ggisls;
double Nvtmrss, Nvtmrssws, Nvtmrsswgs;
double Nvtmrsd, Nvtmrsswd, Nvtmrsswgd;
double vs, Fsevl, dvs_dVg, dvs_dVd, dvs_dVb, dFsevl_dVg, dFsevl_dVd, dFsevl_dVb;
double vgdx, vgsx, epssub, toxe, epsrox;
struct bsim4SizeDependParam *pParam;
int ByPass, ChargeComputationNeeded, error, Check, Check1, Check2;
double m;
ScalingFactor = 1.0e-9;
ChargeComputationNeeded =
((ckt->CKTmode & (MODEDCTRANCURVE | MODEAC | MODETRAN | MODEINITSMSIG)) ||
((ckt->CKTmode & MODETRANOP) && (ckt->CKTmode & MODEUIC)))
? 1 : 0;
#ifndef USE_OMP
for (; model != NULL; model = BSIM4v7nextModel(model))
{ for (here = BSIM4v7instances(model); here != NULL;
here = BSIM4v7nextInstance(here))
{
#endif
Check = Check1 = Check2 = 1;
ByPass = 0;
pParam = here->pParam;
if ((ckt->CKTmode & MODEINITSMSIG))
{ vds = *(ckt->CKTstate0 + here->BSIM4v7vds);
vgs = *(ckt->CKTstate0 + here->BSIM4v7vgs);
vbs = *(ckt->CKTstate0 + here->BSIM4v7vbs);
vges = *(ckt->CKTstate0 + here->BSIM4v7vges);
vgms = *(ckt->CKTstate0 + here->BSIM4v7vgms);
vdbs = *(ckt->CKTstate0 + here->BSIM4v7vdbs);
vsbs = *(ckt->CKTstate0 + here->BSIM4v7vsbs);
vses = *(ckt->CKTstate0 + here->BSIM4v7vses);
vdes = *(ckt->CKTstate0 + here->BSIM4v7vdes);
qdef = *(ckt->CKTstate0 + here->BSIM4v7qdef);
}
else if ((ckt->CKTmode & MODEINITTRAN))
{ vds = *(ckt->CKTstate1 + here->BSIM4v7vds);
vgs = *(ckt->CKTstate1 + here->BSIM4v7vgs);
vbs = *(ckt->CKTstate1 + here->BSIM4v7vbs);
vges = *(ckt->CKTstate1 + here->BSIM4v7vges);
vgms = *(ckt->CKTstate1 + here->BSIM4v7vgms);
vdbs = *(ckt->CKTstate1 + here->BSIM4v7vdbs);
vsbs = *(ckt->CKTstate1 + here->BSIM4v7vsbs);
vses = *(ckt->CKTstate1 + here->BSIM4v7vses);
vdes = *(ckt->CKTstate1 + here->BSIM4v7vdes);
qdef = *(ckt->CKTstate1 + here->BSIM4v7qdef);
}
else if ((ckt->CKTmode & MODEINITJCT) && !here->BSIM4v7off)
{ vds = model->BSIM4v7type * here->BSIM4v7icVDS;
vgs = vges = vgms = model->BSIM4v7type * here->BSIM4v7icVGS;
vbs = vdbs = vsbs = model->BSIM4v7type * here->BSIM4v7icVBS;
if (vds > 0.0)
{ vdes = vds + 0.01;
vses = -0.01;
}
else if (vds < 0.0)
{ vdes = vds - 0.01;
vses = 0.01;
}
else
vdes = vses = 0.0;
qdef = 0.0;
if ((vds == 0.0) && (vgs == 0.0) && (vbs == 0.0) &&
((ckt->CKTmode & (MODETRAN | MODEAC|MODEDCOP |
MODEDCTRANCURVE)) || (!(ckt->CKTmode & MODEUIC))))
{ vds = 0.1;
vdes = 0.11;
vses = -0.01;
vgs = vges = vgms = model->BSIM4v7type
* here->BSIM4v7vth0 + 0.1;
vbs = vdbs = vsbs = 0.0;
}
}
else if ((ckt->CKTmode & (MODEINITJCT | MODEINITFIX)) &&
(here->BSIM4v7off))
{ vds = vgs = vbs = vges = vgms = 0.0;
vdbs = vsbs = vdes = vses = qdef = 0.0;
}
else
{
#ifndef PREDICTOR
if ((ckt->CKTmode & MODEINITPRED))
{ xfact = ckt->CKTdelta / ckt->CKTdeltaOld[1];
*(ckt->CKTstate0 + here->BSIM4v7vds) =
*(ckt->CKTstate1 + here->BSIM4v7vds);
vds = (1.0 + xfact)* (*(ckt->CKTstate1 + here->BSIM4v7vds))
- (xfact * (*(ckt->CKTstate2 + here->BSIM4v7vds)));
*(ckt->CKTstate0 + here->BSIM4v7vgs) =
*(ckt->CKTstate1 + here->BSIM4v7vgs);
vgs = (1.0 + xfact)* (*(ckt->CKTstate1 + here->BSIM4v7vgs))
- (xfact * (*(ckt->CKTstate2 + here->BSIM4v7vgs)));
*(ckt->CKTstate0 + here->BSIM4v7vges) =
*(ckt->CKTstate1 + here->BSIM4v7vges);
vges = (1.0 + xfact)* (*(ckt->CKTstate1 + here->BSIM4v7vges))
- (xfact * (*(ckt->CKTstate2 + here->BSIM4v7vges)));
*(ckt->CKTstate0 + here->BSIM4v7vgms) =
*(ckt->CKTstate1 + here->BSIM4v7vgms);
vgms = (1.0 + xfact)* (*(ckt->CKTstate1 + here->BSIM4v7vgms))
- (xfact * (*(ckt->CKTstate2 + here->BSIM4v7vgms)));
*(ckt->CKTstate0 + here->BSIM4v7vbs) =
*(ckt->CKTstate1 + here->BSIM4v7vbs);
vbs = (1.0 + xfact)* (*(ckt->CKTstate1 + here->BSIM4v7vbs))
- (xfact * (*(ckt->CKTstate2 + here->BSIM4v7vbs)));
*(ckt->CKTstate0 + here->BSIM4v7vbd) =
*(ckt->CKTstate0 + here->BSIM4v7vbs)
- *(ckt->CKTstate0 + here->BSIM4v7vds);
*(ckt->CKTstate0 + here->BSIM4v7vdbs) =
*(ckt->CKTstate1 + here->BSIM4v7vdbs);
vdbs = (1.0 + xfact)* (*(ckt->CKTstate1 + here->BSIM4v7vdbs))
- (xfact * (*(ckt->CKTstate2 + here->BSIM4v7vdbs)));
*(ckt->CKTstate0 + here->BSIM4v7vdbd) =
*(ckt->CKTstate0 + here->BSIM4v7vdbs)
- *(ckt->CKTstate0 + here->BSIM4v7vds);
*(ckt->CKTstate0 + here->BSIM4v7vsbs) =
*(ckt->CKTstate1 + here->BSIM4v7vsbs);
vsbs = (1.0 + xfact)* (*(ckt->CKTstate1 + here->BSIM4v7vsbs))
- (xfact * (*(ckt->CKTstate2 + here->BSIM4v7vsbs)));
*(ckt->CKTstate0 + here->BSIM4v7vses) =
*(ckt->CKTstate1 + here->BSIM4v7vses);
vses = (1.0 + xfact)* (*(ckt->CKTstate1 + here->BSIM4v7vses))
- (xfact * (*(ckt->CKTstate2 + here->BSIM4v7vses)));
*(ckt->CKTstate0 + here->BSIM4v7vdes) =
*(ckt->CKTstate1 + here->BSIM4v7vdes);
vdes = (1.0 + xfact)* (*(ckt->CKTstate1 + here->BSIM4v7vdes))
- (xfact * (*(ckt->CKTstate2 + here->BSIM4v7vdes)));
*(ckt->CKTstate0 + here->BSIM4v7qdef) =
*(ckt->CKTstate1 + here->BSIM4v7qdef);
qdef = (1.0 + xfact)* (*(ckt->CKTstate1 + here->BSIM4v7qdef))
-(xfact * (*(ckt->CKTstate2 + here->BSIM4v7qdef)));
}
else
{
#endif /* PREDICTOR */
vds = model->BSIM4v7type
* (*(ckt->CKTrhsOld + here->BSIM4v7dNodePrime)
- *(ckt->CKTrhsOld + here->BSIM4v7sNodePrime));
vgs = model->BSIM4v7type
* (*(ckt->CKTrhsOld + here->BSIM4v7gNodePrime)
- *(ckt->CKTrhsOld + here->BSIM4v7sNodePrime));
vbs = model->BSIM4v7type
* (*(ckt->CKTrhsOld + here->BSIM4v7bNodePrime)
- *(ckt->CKTrhsOld + here->BSIM4v7sNodePrime));
vges = model->BSIM4v7type
* (*(ckt->CKTrhsOld + here->BSIM4v7gNodeExt)
- *(ckt->CKTrhsOld + here->BSIM4v7sNodePrime));
vgms = model->BSIM4v7type
* (*(ckt->CKTrhsOld + here->BSIM4v7gNodeMid)
- *(ckt->CKTrhsOld + here->BSIM4v7sNodePrime));
vdbs = model->BSIM4v7type
* (*(ckt->CKTrhsOld + here->BSIM4v7dbNode)
- *(ckt->CKTrhsOld + here->BSIM4v7sNodePrime));
vsbs = model->BSIM4v7type
* (*(ckt->CKTrhsOld + here->BSIM4v7sbNode)
- *(ckt->CKTrhsOld + here->BSIM4v7sNodePrime));
vses = model->BSIM4v7type
* (*(ckt->CKTrhsOld + here->BSIM4v7sNode)
- *(ckt->CKTrhsOld + here->BSIM4v7sNodePrime));
vdes = model->BSIM4v7type
* (*(ckt->CKTrhsOld + here->BSIM4v7dNode)
- *(ckt->CKTrhsOld + here->BSIM4v7sNodePrime));
qdef = model->BSIM4v7type
* (*(ckt->CKTrhsOld + here->BSIM4v7qNode));
#ifndef PREDICTOR
}
#endif /* PREDICTOR */
vgdo = *(ckt->CKTstate0 + here->BSIM4v7vgs)
- *(ckt->CKTstate0 + here->BSIM4v7vds);
vgedo = *(ckt->CKTstate0 + here->BSIM4v7vges)
- *(ckt->CKTstate0 + here->BSIM4v7vds);
vgmdo = *(ckt->CKTstate0 + here->BSIM4v7vgms)
- *(ckt->CKTstate0 + here->BSIM4v7vds);
vbd = vbs - vds;
vdbd = vdbs - vds;
vgd = vgs - vds;
vged = vges - vds;
vgmd = vgms - vds;
delvbd = vbd - *(ckt->CKTstate0 + here->BSIM4v7vbd);
delvdbd = vdbd - *(ckt->CKTstate0 + here->BSIM4v7vdbd);
delvgd = vgd - vgdo;
delvged = vged - vgedo;
delvgmd = vgmd - vgmdo;
delvds = vds - *(ckt->CKTstate0 + here->BSIM4v7vds);
delvgs = vgs - *(ckt->CKTstate0 + here->BSIM4v7vgs);
delvges = vges - *(ckt->CKTstate0 + here->BSIM4v7vges);
delvgms = vgms - *(ckt->CKTstate0 + here->BSIM4v7vgms);
delvbs = vbs - *(ckt->CKTstate0 + here->BSIM4v7vbs);
delvdbs = vdbs - *(ckt->CKTstate0 + here->BSIM4v7vdbs);
delvsbs = vsbs - *(ckt->CKTstate0 + here->BSIM4v7vsbs);
delvses = vses - (*(ckt->CKTstate0 + here->BSIM4v7vses));
vdedo = *(ckt->CKTstate0 + here->BSIM4v7vdes)
- *(ckt->CKTstate0 + here->BSIM4v7vds);
delvdes = vdes - *(ckt->CKTstate0 + here->BSIM4v7vdes);
delvded = vdes - vds - vdedo;
delvbd_jct = (!here->BSIM4v7rbodyMod) ? delvbd : delvdbd;
delvbs_jct = (!here->BSIM4v7rbodyMod) ? delvbs : delvsbs;
if (here->BSIM4v7mode >= 0)
{ Idtot = here->BSIM4v7cd + here->BSIM4v7csub - here->BSIM4v7cbd
+ here->BSIM4v7Igidl;
cdhat = Idtot - here->BSIM4v7gbd * delvbd_jct
+ (here->BSIM4v7gmbs + here->BSIM4v7gbbs + here->BSIM4v7ggidlb) * delvbs
+ (here->BSIM4v7gm + here->BSIM4v7gbgs + here->BSIM4v7ggidlg) * delvgs
+ (here->BSIM4v7gds + here->BSIM4v7gbds + here->BSIM4v7ggidld) * delvds;
Ibtot = here->BSIM4v7cbs + here->BSIM4v7cbd
- here->BSIM4v7Igidl - here->BSIM4v7Igisl - here->BSIM4v7csub;
cbhat = Ibtot + here->BSIM4v7gbd * delvbd_jct
+ here->BSIM4v7gbs * delvbs_jct - (here->BSIM4v7gbbs + here->BSIM4v7ggidlb)
* delvbs - (here->BSIM4v7gbgs + here->BSIM4v7ggidlg) * delvgs
- (here->BSIM4v7gbds + here->BSIM4v7ggidld - here->BSIM4v7ggisls) * delvds
- here->BSIM4v7ggislg * delvgd - here->BSIM4v7ggislb* delvbd;
Igstot = here->BSIM4v7Igs + here->BSIM4v7Igcs;
cgshat = Igstot + (here->BSIM4v7gIgsg + here->BSIM4v7gIgcsg) * delvgs
+ here->BSIM4v7gIgcsd * delvds + here->BSIM4v7gIgcsb * delvbs;
Igdtot = here->BSIM4v7Igd + here->BSIM4v7Igcd;
cgdhat = Igdtot + here->BSIM4v7gIgdg * delvgd + here->BSIM4v7gIgcdg * delvgs
+ here->BSIM4v7gIgcdd * delvds + here->BSIM4v7gIgcdb * delvbs;
Igbtot = here->BSIM4v7Igb;
cgbhat = here->BSIM4v7Igb + here->BSIM4v7gIgbg * delvgs + here->BSIM4v7gIgbd
* delvds + here->BSIM4v7gIgbb * delvbs;
}
else
{ Idtot = here->BSIM4v7cd + here->BSIM4v7cbd - here->BSIM4v7Igidl; /* bugfix */
cdhat = Idtot + here->BSIM4v7gbd * delvbd_jct + here->BSIM4v7gmbs
* delvbd + here->BSIM4v7gm * delvgd
- (here->BSIM4v7gds + here->BSIM4v7ggidls) * delvds
- here->BSIM4v7ggidlg * delvgs - here->BSIM4v7ggidlb * delvbs;
Ibtot = here->BSIM4v7cbs + here->BSIM4v7cbd
- here->BSIM4v7Igidl - here->BSIM4v7Igisl - here->BSIM4v7csub;
cbhat = Ibtot + here->BSIM4v7gbs * delvbs_jct + here->BSIM4v7gbd
* delvbd_jct - (here->BSIM4v7gbbs + here->BSIM4v7ggislb) * delvbd
- (here->BSIM4v7gbgs + here->BSIM4v7ggislg) * delvgd
+ (here->BSIM4v7gbds + here->BSIM4v7ggisld - here->BSIM4v7ggidls) * delvds
- here->BSIM4v7ggidlg * delvgs - here->BSIM4v7ggidlb * delvbs;
Igstot = here->BSIM4v7Igs + here->BSIM4v7Igcd;
cgshat = Igstot + here->BSIM4v7gIgsg * delvgs + here->BSIM4v7gIgcdg * delvgd
- here->BSIM4v7gIgcdd * delvds + here->BSIM4v7gIgcdb * delvbd;
Igdtot = here->BSIM4v7Igd + here->BSIM4v7Igcs;
cgdhat = Igdtot + (here->BSIM4v7gIgdg + here->BSIM4v7gIgcsg) * delvgd
- here->BSIM4v7gIgcsd * delvds + here->BSIM4v7gIgcsb * delvbd;
Igbtot = here->BSIM4v7Igb;
cgbhat = here->BSIM4v7Igb + here->BSIM4v7gIgbg * delvgd - here->BSIM4v7gIgbd
* delvds + here->BSIM4v7gIgbb * delvbd;
}
Isestot = here->BSIM4v7gstot * (*(ckt->CKTstate0 + here->BSIM4v7vses));
cseshat = Isestot + here->BSIM4v7gstot * delvses
+ here->BSIM4v7gstotd * delvds + here->BSIM4v7gstotg * delvgs
+ here->BSIM4v7gstotb * delvbs;
Idedtot = here->BSIM4v7gdtot * vdedo;
cdedhat = Idedtot + here->BSIM4v7gdtot * delvded
+ here->BSIM4v7gdtotd * delvds + here->BSIM4v7gdtotg * delvgs
+ here->BSIM4v7gdtotb * delvbs;
#ifndef NOBYPASS
/* Following should be one IF statement, but some C compilers
* can't handle that all at once, so we split it into several
* successive IF's */
if ((!(ckt->CKTmode & MODEINITPRED)) && (ckt->CKTbypass))
if ((fabs(delvds) < (ckt->CKTreltol * MAX(fabs(vds),
fabs(*(ckt->CKTstate0 + here->BSIM4v7vds))) + ckt->CKTvoltTol)))
if ((fabs(delvgs) < (ckt->CKTreltol * MAX(fabs(vgs),
fabs(*(ckt->CKTstate0 + here->BSIM4v7vgs))) + ckt->CKTvoltTol)))
if ((fabs(delvbs) < (ckt->CKTreltol * MAX(fabs(vbs),
fabs(*(ckt->CKTstate0 + here->BSIM4v7vbs))) + ckt->CKTvoltTol)))
if ((fabs(delvbd) < (ckt->CKTreltol * MAX(fabs(vbd),
fabs(*(ckt->CKTstate0 + here->BSIM4v7vbd))) + ckt->CKTvoltTol)))
if ((here->BSIM4v7rgateMod == 0) || (here->BSIM4v7rgateMod == 1)
|| (fabs(delvges) < (ckt->CKTreltol * MAX(fabs(vges),
fabs(*(ckt->CKTstate0 + here->BSIM4v7vges))) + ckt->CKTvoltTol)))
if ((here->BSIM4v7rgateMod != 3) || (fabs(delvgms) < (ckt->CKTreltol
* MAX(fabs(vgms), fabs(*(ckt->CKTstate0 + here->BSIM4v7vgms)))
+ ckt->CKTvoltTol)))
if ((!here->BSIM4v7rbodyMod) || (fabs(delvdbs) < (ckt->CKTreltol
* MAX(fabs(vdbs), fabs(*(ckt->CKTstate0 + here->BSIM4v7vdbs)))
+ ckt->CKTvoltTol)))
if ((!here->BSIM4v7rbodyMod) || (fabs(delvdbd) < (ckt->CKTreltol
* MAX(fabs(vdbd), fabs(*(ckt->CKTstate0 + here->BSIM4v7vdbd)))
+ ckt->CKTvoltTol)))
if ((!here->BSIM4v7rbodyMod) || (fabs(delvsbs) < (ckt->CKTreltol
* MAX(fabs(vsbs), fabs(*(ckt->CKTstate0 + here->BSIM4v7vsbs)))
+ ckt->CKTvoltTol)))
if ((!model->BSIM4v7rdsMod) || (fabs(delvses) < (ckt->CKTreltol
* MAX(fabs(vses), fabs(*(ckt->CKTstate0 + here->BSIM4v7vses)))
+ ckt->CKTvoltTol)))
if ((!model->BSIM4v7rdsMod) || (fabs(delvdes) < (ckt->CKTreltol
* MAX(fabs(vdes), fabs(*(ckt->CKTstate0 + here->BSIM4v7vdes)))
+ ckt->CKTvoltTol)))
if ((fabs(cdhat - Idtot) < ckt->CKTreltol
* MAX(fabs(cdhat), fabs(Idtot)) + ckt->CKTabstol))
if ((fabs(cbhat - Ibtot) < ckt->CKTreltol
* MAX(fabs(cbhat), fabs(Ibtot)) + ckt->CKTabstol))
if ((!model->BSIM4v7igcMod) || ((fabs(cgshat - Igstot) < ckt->CKTreltol
* MAX(fabs(cgshat), fabs(Igstot)) + ckt->CKTabstol)))
if ((!model->BSIM4v7igcMod) || ((fabs(cgdhat - Igdtot) < ckt->CKTreltol
* MAX(fabs(cgdhat), fabs(Igdtot)) + ckt->CKTabstol)))
if ((!model->BSIM4v7igbMod) || ((fabs(cgbhat - Igbtot) < ckt->CKTreltol
* MAX(fabs(cgbhat), fabs(Igbtot)) + ckt->CKTabstol)))
if ((!model->BSIM4v7rdsMod) || ((fabs(cseshat - Isestot) < ckt->CKTreltol
* MAX(fabs(cseshat), fabs(Isestot)) + ckt->CKTabstol)))
if ((!model->BSIM4v7rdsMod) || ((fabs(cdedhat - Idedtot) < ckt->CKTreltol
* MAX(fabs(cdedhat), fabs(Idedtot)) + ckt->CKTabstol)))
{ vds = *(ckt->CKTstate0 + here->BSIM4v7vds);
vgs = *(ckt->CKTstate0 + here->BSIM4v7vgs);
vbs = *(ckt->CKTstate0 + here->BSIM4v7vbs);
vges = *(ckt->CKTstate0 + here->BSIM4v7vges);
vgms = *(ckt->CKTstate0 + here->BSIM4v7vgms);
vbd = *(ckt->CKTstate0 + here->BSIM4v7vbd);
vdbs = *(ckt->CKTstate0 + here->BSIM4v7vdbs);
vdbd = *(ckt->CKTstate0 + here->BSIM4v7vdbd);
vsbs = *(ckt->CKTstate0 + here->BSIM4v7vsbs);
vses = *(ckt->CKTstate0 + here->BSIM4v7vses);
vdes = *(ckt->CKTstate0 + here->BSIM4v7vdes);
vgd = vgs - vds;
vgb = vgs - vbs;
vged = vges - vds;
vgmd = vgms - vds;
vgmb = vgms - vbs;
vbs_jct = (!here->BSIM4v7rbodyMod) ? vbs : vsbs;
vbd_jct = (!here->BSIM4v7rbodyMod) ? vbd : vdbd;
/*** qdef should not be kept fixed even if vgs, vds & vbs has converged
**** qdef = *(ckt->CKTstate0 + here->BSIM4v7qdef);
***/
cdrain = here->BSIM4v7cd;
if ((ckt->CKTmode & (MODETRAN | MODEAC)) ||
((ckt->CKTmode & MODETRANOP) &&
(ckt->CKTmode & MODEUIC)))
{ ByPass = 1;
qgate = here->BSIM4v7qgate;
qbulk = here->BSIM4v7qbulk;
qdrn = here->BSIM4v7qdrn;
cgdo = here->BSIM4v7cgdo;
qgdo = here->BSIM4v7qgdo;
cgso = here->BSIM4v7cgso;
qgso = here->BSIM4v7qgso;
goto line755;
}
else
goto line850;
}
#endif /*NOBYPASS*/
von = here->BSIM4v7von;
if (*(ckt->CKTstate0 + here->BSIM4v7vds) >= 0.0)
{ vgs = DEVfetlim(vgs, *(ckt->CKTstate0 + here->BSIM4v7vgs), von);
vds = vgs - vgd;
vds = DEVlimvds(vds, *(ckt->CKTstate0 + here->BSIM4v7vds));
vgd = vgs - vds;
if (here->BSIM4v7rgateMod == 3)
{ vges = DEVfetlim(vges, *(ckt->CKTstate0 + here->BSIM4v7vges), von);
vgms = DEVfetlim(vgms, *(ckt->CKTstate0 + here->BSIM4v7vgms), von);
vged = vges - vds;
vgmd = vgms - vds;
}
else if ((here->BSIM4v7rgateMod == 1) || (here->BSIM4v7rgateMod == 2))
{ vges = DEVfetlim(vges, *(ckt->CKTstate0 + here->BSIM4v7vges), von);
vged = vges - vds;
}
if (model->BSIM4v7rdsMod)
{ vdes = DEVlimvds(vdes, *(ckt->CKTstate0 + here->BSIM4v7vdes));
vses = -DEVlimvds(-vses, -(*(ckt->CKTstate0 + here->BSIM4v7vses)));
}
}
else
{ vgd = DEVfetlim(vgd, vgdo, von);
vds = vgs - vgd;
vds = -DEVlimvds(-vds, -(*(ckt->CKTstate0 + here->BSIM4v7vds)));
vgs = vgd + vds;
if (here->BSIM4v7rgateMod == 3)
{ vged = DEVfetlim(vged, vgedo, von);
vges = vged + vds;
vgmd = DEVfetlim(vgmd, vgmdo, von);
vgms = vgmd + vds;
}
if ((here->BSIM4v7rgateMod == 1) || (here->BSIM4v7rgateMod == 2))
{ vged = DEVfetlim(vged, vgedo, von);
vges = vged + vds;
}
if (model->BSIM4v7rdsMod)
{ vdes = -DEVlimvds(-vdes, -(*(ckt->CKTstate0 + here->BSIM4v7vdes)));
vses = DEVlimvds(vses, *(ckt->CKTstate0 + here->BSIM4v7vses));
}
}
if (vds >= 0.0)
{ vbs = DEVpnjlim(vbs, *(ckt->CKTstate0 + here->BSIM4v7vbs),
CONSTvt0, model->BSIM4v7vcrit, &Check);
vbd = vbs - vds;
if (here->BSIM4v7rbodyMod)
{ vdbs = DEVpnjlim(vdbs, *(ckt->CKTstate0 + here->BSIM4v7vdbs),
CONSTvt0, model->BSIM4v7vcrit, &Check1);
vdbd = vdbs - vds;
vsbs = DEVpnjlim(vsbs, *(ckt->CKTstate0 + here->BSIM4v7vsbs),
CONSTvt0, model->BSIM4v7vcrit, &Check2);
if ((Check1 == 0) && (Check2 == 0))
Check = 0;
else
Check = 1;
}
}
else
{ vbd = DEVpnjlim(vbd, *(ckt->CKTstate0 + here->BSIM4v7vbd),
CONSTvt0, model->BSIM4v7vcrit, &Check);
vbs = vbd + vds;
if (here->BSIM4v7rbodyMod)
{ vdbd = DEVpnjlim(vdbd, *(ckt->CKTstate0 + here->BSIM4v7vdbd),
CONSTvt0, model->BSIM4v7vcrit, &Check1);
vdbs = vdbd + vds;
vsbdo = *(ckt->CKTstate0 + here->BSIM4v7vsbs)
- *(ckt->CKTstate0 + here->BSIM4v7vds);
vsbd = vsbs - vds;
vsbd = DEVpnjlim(vsbd, vsbdo, CONSTvt0, model->BSIM4v7vcrit, &Check2);
vsbs = vsbd + vds;
if ((Check1 == 0) && (Check2 == 0))
Check = 0;
else
Check = 1;
}
}
}
/* Calculate DC currents and their derivatives */
vbd = vbs - vds;
vgd = vgs - vds;
vgb = vgs - vbs;
vged = vges - vds;
vgmd = vgms - vds;
vgmb = vgms - vbs;
vdbd = vdbs - vds;
vbs_jct = (!here->BSIM4v7rbodyMod) ? vbs : vsbs;
vbd_jct = (!here->BSIM4v7rbodyMod) ? vbd : vdbd;
/* Source/drain junction diode DC model begins */
Nvtms = model->BSIM4v7vtm * model->BSIM4v7SjctEmissionCoeff;
/* if ((here->BSIM4v7Aseff <= 0.0) && (here->BSIM4v7Pseff <= 0.0))
{ SourceSatCurrent = 1.0e-14;
} v4.7 */
if ((here->BSIM4v7Aseff <= 0.0) && (here->BSIM4v7Pseff <= 0.0))
{ SourceSatCurrent = 0.0;
}
else
{ SourceSatCurrent = here->BSIM4v7Aseff * model->BSIM4v7SjctTempSatCurDensity
+ here->BSIM4v7Pseff * model->BSIM4v7SjctSidewallTempSatCurDensity
+ pParam->BSIM4v7weffCJ * here->BSIM4v7nf
* model->BSIM4v7SjctGateSidewallTempSatCurDensity;
}
if (SourceSatCurrent <= 0.0)
{ here->BSIM4v7gbs = ckt->CKTgmin;
here->BSIM4v7cbs = here->BSIM4v7gbs * vbs_jct;
}
else
{ switch(model->BSIM4v7dioMod)
{ case 0:
evbs = exp(vbs_jct / Nvtms);
T1 = model->BSIM4v7xjbvs * exp(-(model->BSIM4v7bvs + vbs_jct) / Nvtms);
/* WDLiu: Magic T1 in this form; different from BSIM4v7 beta. */
here->BSIM4v7gbs = SourceSatCurrent * (evbs + T1) / Nvtms + ckt->CKTgmin;
here->BSIM4v7cbs = SourceSatCurrent * (evbs + here->BSIM4v7XExpBVS
- T1 - 1.0) + ckt->CKTgmin * vbs_jct;
break;
case 1:
T2 = vbs_jct / Nvtms;
if (T2 < -EXP_THRESHOLD)
{ here->BSIM4v7gbs = ckt->CKTgmin;
here->BSIM4v7cbs = SourceSatCurrent * (MIN_EXP - 1.0)
+ ckt->CKTgmin * vbs_jct;
}
else if (vbs_jct <= here->BSIM4v7vjsmFwd)
{ evbs = exp(T2);
here->BSIM4v7gbs = SourceSatCurrent * evbs / Nvtms + ckt->CKTgmin;
here->BSIM4v7cbs = SourceSatCurrent * (evbs - 1.0)
+ ckt->CKTgmin * vbs_jct;
}
else
{ T0 = here->BSIM4v7IVjsmFwd / Nvtms;
here->BSIM4v7gbs = T0 + ckt->CKTgmin;
here->BSIM4v7cbs = here->BSIM4v7IVjsmFwd - SourceSatCurrent + T0
* (vbs_jct - here->BSIM4v7vjsmFwd) + ckt->CKTgmin * vbs_jct;
}
break;
case 2:
if (vbs_jct < here->BSIM4v7vjsmRev)
{ T0 = vbs_jct / Nvtms;
if (T0 < -EXP_THRESHOLD)
{ evbs = MIN_EXP;
devbs_dvb = 0.0;
}
else
{ evbs = exp(T0);
devbs_dvb = evbs / Nvtms;
}
T1 = evbs - 1.0;
T2 = here->BSIM4v7IVjsmRev + here->BSIM4v7SslpRev
* (vbs_jct - here->BSIM4v7vjsmRev);
here->BSIM4v7gbs = devbs_dvb * T2 + T1 * here->BSIM4v7SslpRev + ckt->CKTgmin;
here->BSIM4v7cbs = T1 * T2 + ckt->CKTgmin * vbs_jct;
}
else if (vbs_jct <= here->BSIM4v7vjsmFwd)
{ T0 = vbs_jct / Nvtms;
if (T0 < -EXP_THRESHOLD)
{ evbs = MIN_EXP;
devbs_dvb = 0.0;
}
else
{ evbs = exp(T0);
devbs_dvb = evbs / Nvtms;
}
T1 = (model->BSIM4v7bvs + vbs_jct) / Nvtms;
if (T1 > EXP_THRESHOLD)
{ T2 = MIN_EXP;
T3 = 0.0;
}
else
{ T2 = exp(-T1);
T3 = -T2 /Nvtms;
}
here->BSIM4v7gbs = SourceSatCurrent * (devbs_dvb - model->BSIM4v7xjbvs * T3)
+ ckt->CKTgmin;
here->BSIM4v7cbs = SourceSatCurrent * (evbs + here->BSIM4v7XExpBVS - 1.0
- model->BSIM4v7xjbvs * T2) + ckt->CKTgmin * vbs_jct;
}
else
{ here->BSIM4v7gbs = here->BSIM4v7SslpFwd + ckt->CKTgmin;
here->BSIM4v7cbs = here->BSIM4v7IVjsmFwd + here->BSIM4v7SslpFwd * (vbs_jct
- here->BSIM4v7vjsmFwd) + ckt->CKTgmin * vbs_jct;
}
break;
default: break;
}
}
Nvtmd = model->BSIM4v7vtm * model->BSIM4v7DjctEmissionCoeff;
/* if ((here->BSIM4v7Adeff <= 0.0) && (here->BSIM4v7Pdeff <= 0.0))
{ DrainSatCurrent = 1.0e-14;
} v4.7 */
if ((here->BSIM4v7Adeff <= 0.0) && (here->BSIM4v7Pdeff <= 0.0))
{ DrainSatCurrent = 0.0;
}
else
{ DrainSatCurrent = here->BSIM4v7Adeff * model->BSIM4v7DjctTempSatCurDensity
+ here->BSIM4v7Pdeff * model->BSIM4v7DjctSidewallTempSatCurDensity
+ pParam->BSIM4v7weffCJ * here->BSIM4v7nf
* model->BSIM4v7DjctGateSidewallTempSatCurDensity;
}
if (DrainSatCurrent <= 0.0)
{ here->BSIM4v7gbd = ckt->CKTgmin;
here->BSIM4v7cbd = here->BSIM4v7gbd * vbd_jct;
}
else
{ switch(model->BSIM4v7dioMod)
{ case 0:
evbd = exp(vbd_jct / Nvtmd);
T1 = model->BSIM4v7xjbvd * exp(-(model->BSIM4v7bvd + vbd_jct) / Nvtmd);
/* WDLiu: Magic T1 in this form; different from BSIM4v7 beta. */
here->BSIM4v7gbd = DrainSatCurrent * (evbd + T1) / Nvtmd + ckt->CKTgmin;
here->BSIM4v7cbd = DrainSatCurrent * (evbd + here->BSIM4v7XExpBVD
- T1 - 1.0) + ckt->CKTgmin * vbd_jct;
break;
case 1:
T2 = vbd_jct / Nvtmd;
if (T2 < -EXP_THRESHOLD)
{ here->BSIM4v7gbd = ckt->CKTgmin;
here->BSIM4v7cbd = DrainSatCurrent * (MIN_EXP - 1.0)
+ ckt->CKTgmin * vbd_jct;
}
else if (vbd_jct <= here->BSIM4v7vjdmFwd)
{ evbd = exp(T2);
here->BSIM4v7gbd = DrainSatCurrent * evbd / Nvtmd + ckt->CKTgmin;
here->BSIM4v7cbd = DrainSatCurrent * (evbd - 1.0)
+ ckt->CKTgmin * vbd_jct;
}
else
{ T0 = here->BSIM4v7IVjdmFwd / Nvtmd;
here->BSIM4v7gbd = T0 + ckt->CKTgmin;
here->BSIM4v7cbd = here->BSIM4v7IVjdmFwd - DrainSatCurrent + T0
* (vbd_jct - here->BSIM4v7vjdmFwd) + ckt->CKTgmin * vbd_jct;
}
break;
case 2:
if (vbd_jct < here->BSIM4v7vjdmRev)
{ T0 = vbd_jct / Nvtmd;
if (T0 < -EXP_THRESHOLD)
{ evbd = MIN_EXP;
devbd_dvb = 0.0;
}
else
{ evbd = exp(T0);
devbd_dvb = evbd / Nvtmd;
}
T1 = evbd - 1.0;
T2 = here->BSIM4v7IVjdmRev + here->BSIM4v7DslpRev
* (vbd_jct - here->BSIM4v7vjdmRev);
here->BSIM4v7gbd = devbd_dvb * T2 + T1 * here->BSIM4v7DslpRev + ckt->CKTgmin;
here->BSIM4v7cbd = T1 * T2 + ckt->CKTgmin * vbd_jct;
}
else if (vbd_jct <= here->BSIM4v7vjdmFwd)
{ T0 = vbd_jct / Nvtmd;
if (T0 < -EXP_THRESHOLD)
{ evbd = MIN_EXP;
devbd_dvb = 0.0;
}
else
{ evbd = exp(T0);
devbd_dvb = evbd / Nvtmd;
}
T1 = (model->BSIM4v7bvd + vbd_jct) / Nvtmd;
if (T1 > EXP_THRESHOLD)
{ T2 = MIN_EXP;
T3 = 0.0;
}
else
{ T2 = exp(-T1);
T3 = -T2 /Nvtmd;
}
here->BSIM4v7gbd = DrainSatCurrent * (devbd_dvb - model->BSIM4v7xjbvd * T3)
+ ckt->CKTgmin;
here->BSIM4v7cbd = DrainSatCurrent * (evbd + here->BSIM4v7XExpBVD - 1.0
- model->BSIM4v7xjbvd * T2) + ckt->CKTgmin * vbd_jct;
}
else
{ here->BSIM4v7gbd = here->BSIM4v7DslpFwd + ckt->CKTgmin;
here->BSIM4v7cbd = here->BSIM4v7IVjdmFwd + here->BSIM4v7DslpFwd * (vbd_jct
- here->BSIM4v7vjdmFwd) + ckt->CKTgmin * vbd_jct;
}
break;
default: break;
}
}
/* trap-assisted tunneling and recombination current for reverse bias */
Nvtmrssws = model->BSIM4v7vtm0 * model->BSIM4v7njtsswstemp;
Nvtmrsswgs = model->BSIM4v7vtm0 * model->BSIM4v7njtsswgstemp;
Nvtmrss = model->BSIM4v7vtm0 * model->BSIM4v7njtsstemp;
Nvtmrsswd = model->BSIM4v7vtm0 * model->BSIM4v7njtsswdtemp;
Nvtmrsswgd = model->BSIM4v7vtm0 * model->BSIM4v7njtsswgdtemp;
Nvtmrsd = model->BSIM4v7vtm0 * model->BSIM4v7njtsdtemp;
if ((model->BSIM4v7vtss - vbs_jct) < (model->BSIM4v7vtss * 1e-3))
{ T9 = 1.0e3;
T0 = - vbs_jct / Nvtmrss * T9;
DEXP(T0, T1, T10);
dT1_dVb = T10 / Nvtmrss * T9;
} else {
T9 = 1.0 / (model->BSIM4v7vtss - vbs_jct);
T0 = -vbs_jct / Nvtmrss * model->BSIM4v7vtss * T9;
dT0_dVb = model->BSIM4v7vtss / Nvtmrss * (T9 + vbs_jct * T9 * T9) ;
DEXP(T0, T1, T10);
dT1_dVb = T10 * dT0_dVb;
}
if ((model->BSIM4v7vtsd - vbd_jct) < (model->BSIM4v7vtsd * 1e-3) )
{ T9 = 1.0e3;
T0 = -vbd_jct / Nvtmrsd * T9;
DEXP(T0, T2, T10);
dT2_dVb = T10 / Nvtmrsd * T9;
} else {
T9 = 1.0 / (model->BSIM4v7vtsd - vbd_jct);
T0 = -vbd_jct / Nvtmrsd * model->BSIM4v7vtsd * T9;
dT0_dVb = model->BSIM4v7vtsd / Nvtmrsd * (T9 + vbd_jct * T9 * T9) ;
DEXP(T0, T2, T10);
dT2_dVb = T10 * dT0_dVb;
}
if ((model->BSIM4v7vtssws - vbs_jct) < (model->BSIM4v7vtssws * 1e-3) )
{ T9 = 1.0e3;
T0 = -vbs_jct / Nvtmrssws * T9;
DEXP(T0, T3, T10);
dT3_dVb = T10 / Nvtmrssws * T9;
} else {
T9 = 1.0 / (model->BSIM4v7vtssws - vbs_jct);
T0 = -vbs_jct / Nvtmrssws * model->BSIM4v7vtssws * T9;
dT0_dVb = model->BSIM4v7vtssws / Nvtmrssws * (T9 + vbs_jct * T9 * T9) ;
DEXP(T0, T3, T10);
dT3_dVb = T10 * dT0_dVb;
}
if ((model->BSIM4v7vtsswd - vbd_jct) < (model->BSIM4v7vtsswd * 1e-3) )
{ T9 = 1.0e3;
T0 = -vbd_jct / Nvtmrsswd * T9;
DEXP(T0, T4, T10);
dT4_dVb = T10 / Nvtmrsswd * T9;
} else {
T9 = 1.0 / (model->BSIM4v7vtsswd - vbd_jct);
T0 = -vbd_jct / Nvtmrsswd * model->BSIM4v7vtsswd * T9;
dT0_dVb = model->BSIM4v7vtsswd / Nvtmrsswd * (T9 + vbd_jct * T9 * T9) ;
DEXP(T0, T4, T10);
dT4_dVb = T10 * dT0_dVb;
}
if ((model->BSIM4v7vtsswgs - vbs_jct) < (model->BSIM4v7vtsswgs * 1e-3) )
{ T9 = 1.0e3;
T0 = -vbs_jct / Nvtmrsswgs * T9;
DEXP(T0, T5, T10);
dT5_dVb = T10 / Nvtmrsswgs * T9;
} else {
T9 = 1.0 / (model->BSIM4v7vtsswgs - vbs_jct);
T0 = -vbs_jct / Nvtmrsswgs * model->BSIM4v7vtsswgs * T9;
dT0_dVb = model->BSIM4v7vtsswgs / Nvtmrsswgs * (T9 + vbs_jct * T9 * T9) ;
DEXP(T0, T5, T10);
dT5_dVb = T10 * dT0_dVb;
}
if ((model->BSIM4v7vtsswgd - vbd_jct) < (model->BSIM4v7vtsswgd * 1e-3) )
{ T9 = 1.0e3;
T0 = -vbd_jct / Nvtmrsswgd * T9;
DEXP(T0, T6, T10);
dT6_dVb = T10 / Nvtmrsswgd * T9;
} else {
T9 = 1.0 / (model->BSIM4v7vtsswgd - vbd_jct);
T0 = -vbd_jct / Nvtmrsswgd * model->BSIM4v7vtsswgd * T9;
dT0_dVb = model->BSIM4v7vtsswgd / Nvtmrsswgd * (T9 + vbd_jct * T9 * T9) ;
DEXP(T0, T6, T10);
dT6_dVb = T10 * dT0_dVb;
}
here->BSIM4v7gbs += here->BSIM4v7SjctTempRevSatCur * dT1_dVb
+ here->BSIM4v7SswTempRevSatCur * dT3_dVb
+ here->BSIM4v7SswgTempRevSatCur * dT5_dVb;
here->BSIM4v7cbs -= here->BSIM4v7SjctTempRevSatCur * (T1 - 1.0)
+ here->BSIM4v7SswTempRevSatCur * (T3 - 1.0)
+ here->BSIM4v7SswgTempRevSatCur * (T5 - 1.0);
here->BSIM4v7gbd += here->BSIM4v7DjctTempRevSatCur * dT2_dVb
+ here->BSIM4v7DswTempRevSatCur * dT4_dVb
+ here->BSIM4v7DswgTempRevSatCur * dT6_dVb;
here->BSIM4v7cbd -= here->BSIM4v7DjctTempRevSatCur * (T2 - 1.0)
+ here->BSIM4v7DswTempRevSatCur * (T4 - 1.0)
+ here->BSIM4v7DswgTempRevSatCur * (T6 - 1.0);
/* End of diode DC model */
if (vds >= 0.0)
{ here->BSIM4v7mode = 1;
Vds = vds;
Vgs = vgs;
Vbs = vbs;
Vdb = vds - vbs; /* WDLiu: for GIDL */
}
else
{ here->BSIM4v7mode = -1;
Vds = -vds;
Vgs = vgd;
Vbs = vbd;
Vdb = -vbs;
}
/* dunga */
if(model->BSIM4v7mtrlMod)
{
epsrox = 3.9;
toxe = model->BSIM4v7eot;
epssub = EPS0 * model->BSIM4v7epsrsub;
}
else
{
epsrox = model->BSIM4v7epsrox;
toxe = model->BSIM4v7toxe;
epssub = EPSSI;
}
T0 = Vbs - here->BSIM4v7vbsc - 0.001;
T1 = sqrt(T0 * T0 - 0.004 * here->BSIM4v7vbsc);
if (T0 >= 0.0)
{ Vbseff = here->BSIM4v7vbsc + 0.5 * (T0 + T1);
dVbseff_dVb = 0.5 * (1.0 + T0 / T1);
}
else
{ T2 = -0.002 / (T1 - T0);
Vbseff = here->BSIM4v7vbsc * (1.0 + T2);
dVbseff_dVb = T2 * here->BSIM4v7vbsc / T1;
}
/* JX: Correction to forward body bias */
T9 = 0.95 * pParam->BSIM4v7phi;
T0 = T9 - Vbseff - 0.001;
T1 = sqrt(T0 * T0 + 0.004 * T9);
Vbseff = T9 - 0.5 * (T0 + T1);
dVbseff_dVb *= 0.5 * (1.0 + T0 / T1);
Phis = pParam->BSIM4v7phi - Vbseff;
dPhis_dVb = -1.0;
sqrtPhis = sqrt(Phis);
dsqrtPhis_dVb = -0.5 / sqrtPhis;
Xdep = pParam->BSIM4v7Xdep0 * sqrtPhis / pParam->BSIM4v7sqrtPhi;
dXdep_dVb = (pParam->BSIM4v7Xdep0 / pParam->BSIM4v7sqrtPhi)
* dsqrtPhis_dVb;
Leff = pParam->BSIM4v7leff;
Vtm = model->BSIM4v7vtm;
Vtm0 = model->BSIM4v7vtm0;
/* Vth Calculation */
T3 = sqrt(Xdep);
V0 = pParam->BSIM4v7vbi - pParam->BSIM4v7phi;
T0 = pParam->BSIM4v7dvt2 * Vbseff;
if (T0 >= - 0.5)
{ T1 = 1.0 + T0;
T2 = pParam->BSIM4v7dvt2;
}
else
{ T4 = 1.0 / (3.0 + 8.0 * T0);
T1 = (1.0 + 3.0 * T0) * T4;
T2 = pParam->BSIM4v7dvt2 * T4 * T4;
}
lt1 = model->BSIM4v7factor1 * T3 * T1;
dlt1_dVb = model->BSIM4v7factor1 * (0.5 / T3 * T1 * dXdep_dVb + T3 * T2);
T0 = pParam->BSIM4v7dvt2w * Vbseff;
if (T0 >= - 0.5)
{ T1 = 1.0 + T0;
T2 = pParam->BSIM4v7dvt2w;
}
else
{ T4 = 1.0 / (3.0 + 8.0 * T0);
T1 = (1.0 + 3.0 * T0) * T4;
T2 = pParam->BSIM4v7dvt2w * T4 * T4;
}
ltw = model->BSIM4v7factor1 * T3 * T1;
dltw_dVb = model->BSIM4v7factor1 * (0.5 / T3 * T1 * dXdep_dVb + T3 * T2);
T0 = pParam->BSIM4v7dvt1 * Leff / lt1;
if (T0 < EXP_THRESHOLD)
{ T1 = exp(T0);
T2 = T1 - 1.0;
T3 = T2 * T2;
T4 = T3 + 2.0 * T1 * MIN_EXP;
Theta0 = T1 / T4;
dT1_dVb = -T0 * T1 * dlt1_dVb / lt1;
dTheta0_dVb = dT1_dVb * (T4 - 2.0 * T1 * (T2 + MIN_EXP)) / T4 / T4;
}
else
{ Theta0 = 1.0 / (MAX_EXP - 2.0); /* 3.0 * MIN_EXP omitted */
dTheta0_dVb = 0.0;
}
here->BSIM4v7thetavth = pParam->BSIM4v7dvt0 * Theta0;
Delt_vth = here->BSIM4v7thetavth * V0;
dDelt_vth_dVb = pParam->BSIM4v7dvt0 * dTheta0_dVb * V0;
T0 = pParam->BSIM4v7dvt1w * pParam->BSIM4v7weff * Leff / ltw;
if (T0 < EXP_THRESHOLD)
{ T1 = exp(T0);
T2 = T1 - 1.0;
T3 = T2 * T2;
T4 = T3 + 2.0 * T1 * MIN_EXP;
T5 = T1 / T4;
dT1_dVb = -T0 * T1 * dltw_dVb / ltw;
dT5_dVb = dT1_dVb * (T4 - 2.0 * T1 * (T2 + MIN_EXP)) / T4 / T4;
}
else
{ T5 = 1.0 / (MAX_EXP - 2.0); /* 3.0 * MIN_EXP omitted */
dT5_dVb = 0.0;
}
T0 = pParam->BSIM4v7dvt0w * T5;
T2 = T0 * V0;
dT2_dVb = pParam->BSIM4v7dvt0w * dT5_dVb * V0;
TempRatio = ckt->CKTtemp / model->BSIM4v7tnom - 1.0;
T0 = sqrt(1.0 + pParam->BSIM4v7lpe0 / Leff);
T1 = pParam->BSIM4v7k1ox * (T0 - 1.0) * pParam->BSIM4v7sqrtPhi
+ (pParam->BSIM4v7kt1 + pParam->BSIM4v7kt1l / Leff
+ pParam->BSIM4v7kt2 * Vbseff) * TempRatio;
Vth_NarrowW = toxe * pParam->BSIM4v7phi
/ (pParam->BSIM4v7weff + pParam->BSIM4v7w0);
T3 = here->BSIM4v7eta0 + pParam->BSIM4v7etab * Vbseff;
if (T3 < 1.0e-4)
{ T9 = 1.0 / (3.0 - 2.0e4 * T3);
T3 = (2.0e-4 - T3) * T9;
T4 = T9 * T9;
}
else
{ T4 = 1.0;
}
dDIBL_Sft_dVd = T3 * pParam->BSIM4v7theta0vb0;
DIBL_Sft = dDIBL_Sft_dVd * Vds;
Lpe_Vb = sqrt(1.0 + pParam->BSIM4v7lpeb / Leff);
Vth = model->BSIM4v7type * here->BSIM4v7vth0 + (pParam->BSIM4v7k1ox * sqrtPhis
- pParam->BSIM4v7k1 * pParam->BSIM4v7sqrtPhi) * Lpe_Vb
- here->BSIM4v7k2ox * Vbseff - Delt_vth - T2 + (pParam->BSIM4v7k3
+ pParam->BSIM4v7k3b * Vbseff) * Vth_NarrowW + T1 - DIBL_Sft;
dVth_dVb = Lpe_Vb * pParam->BSIM4v7k1ox * dsqrtPhis_dVb - here->BSIM4v7k2ox
- dDelt_vth_dVb - dT2_dVb + pParam->BSIM4v7k3b * Vth_NarrowW
- pParam->BSIM4v7etab * Vds * pParam->BSIM4v7theta0vb0 * T4
+ pParam->BSIM4v7kt2 * TempRatio;
dVth_dVd = -dDIBL_Sft_dVd;
/* Calculate n */
tmp1 = epssub / Xdep;
here->BSIM4v7nstar = model->BSIM4v7vtm / Charge_q * (model->BSIM4v7coxe
+ tmp1 + pParam->BSIM4v7cit);
tmp2 = pParam->BSIM4v7nfactor * tmp1;
tmp3 = pParam->BSIM4v7cdsc + pParam->BSIM4v7cdscb * Vbseff
+ pParam->BSIM4v7cdscd * Vds;
tmp4 = (tmp2 + tmp3 * Theta0 + pParam->BSIM4v7cit) / model->BSIM4v7coxe;
if (tmp4 >= -0.5)
{ n = 1.0 + tmp4;
dn_dVb = (-tmp2 / Xdep * dXdep_dVb + tmp3 * dTheta0_dVb
+ pParam->BSIM4v7cdscb * Theta0) / model->BSIM4v7coxe;
dn_dVd = pParam->BSIM4v7cdscd * Theta0 / model->BSIM4v7coxe;
}
else
{ T0 = 1.0 / (3.0 + 8.0 * tmp4);
n = (1.0 + 3.0 * tmp4) * T0;
T0 *= T0;
dn_dVb = (-tmp2 / Xdep * dXdep_dVb + tmp3 * dTheta0_dVb
+ pParam->BSIM4v7cdscb * Theta0) / model->BSIM4v7coxe * T0;
dn_dVd = pParam->BSIM4v7cdscd * Theta0 / model->BSIM4v7coxe * T0;
}
/* Vth correction for Pocket implant */
if (pParam->BSIM4v7dvtp0 > 0.0)
{ T0 = -pParam->BSIM4v7dvtp1 * Vds;
if (T0 < -EXP_THRESHOLD)
{ T2 = MIN_EXP;
dT2_dVd = 0.0;
}
else
{ T2 = exp(T0);
dT2_dVd = -pParam->BSIM4v7dvtp1 * T2;
}
T3 = Leff + pParam->BSIM4v7dvtp0 * (1.0 + T2);
dT3_dVd = pParam->BSIM4v7dvtp0 * dT2_dVd;
if (model->BSIM4v7tempMod < 2)
{
T4 = Vtm * log(Leff / T3);
dT4_dVd = -Vtm * dT3_dVd / T3;
}
else
{
T4 = model->BSIM4v7vtm0 * log(Leff / T3);
dT4_dVd = -model->BSIM4v7vtm0 * dT3_dVd / T3;
}
dDITS_Sft_dVd = dn_dVd * T4 + n * dT4_dVd;
dDITS_Sft_dVb = T4 * dn_dVb;
Vth -= n * T4;
dVth_dVd -= dDITS_Sft_dVd;
dVth_dVb -= dDITS_Sft_dVb;
}
/* v4.7 DITS_SFT2 */
if ((pParam->BSIM4v7dvtp4 == 0.0) || (pParam->BSIM4v7dvtp2factor == 0.0)) {
T0 = 0.0;
DITS_Sft2 = 0.0;
}
else
{
//T0 = exp(2.0 * pParam->BSIM4v7dvtp4 * Vds); /* beta code */
T1 = 2.0 * pParam->BSIM4v7dvtp4 * Vds;
DEXP(T1, T0, T10);
DITS_Sft2 = pParam->BSIM4v7dvtp2factor * (T0-1) / (T0+1);
//dDITS_Sft2_dVd = pParam->BSIM4v7dvtp2factor * pParam->BSIM4v7dvtp4 * 4.0 * T0 / ((T0+1) * (T0+1)); /* beta code */
dDITS_Sft2_dVd = pParam->BSIM4v7dvtp2factor * pParam->BSIM4v7dvtp4 * 4.0 * T10 / ((T0+1) * (T0+1));
Vth -= DITS_Sft2;
dVth_dVd -= dDITS_Sft2_dVd;
}
here->BSIM4v7von = Vth;
/* Poly Gate Si Depletion Effect */
T0 = here->BSIM4v7vfb + pParam->BSIM4v7phi;
if(model->BSIM4v7mtrlMod == 0)
T1 = EPSSI;
else
T1 = model->BSIM4v7epsrgate * EPS0;
BSIM4v7polyDepletion(T0, pParam->BSIM4v7ngate, T1, model->BSIM4v7coxe, vgs, &vgs_eff, &dvgs_eff_dvg);
BSIM4v7polyDepletion(T0, pParam->BSIM4v7ngate, T1, model->BSIM4v7coxe, vgd, &vgd_eff, &dvgd_eff_dvg);
if(here->BSIM4v7mode>0) {
Vgs_eff = vgs_eff;
dVgs_eff_dVg = dvgs_eff_dvg;
} else {
Vgs_eff = vgd_eff;
dVgs_eff_dVg = dvgd_eff_dvg;
}
here->BSIM4v7vgs_eff = vgs_eff;
here->BSIM4v7vgd_eff = vgd_eff;
here->BSIM4v7dvgs_eff_dvg = dvgs_eff_dvg;
here->BSIM4v7dvgd_eff_dvg = dvgd_eff_dvg;
Vgst = Vgs_eff - Vth;
/* Calculate Vgsteff */
T0 = n * Vtm;
T1 = pParam->BSIM4v7mstar * Vgst;
T2 = T1 / T0;
if (T2 > EXP_THRESHOLD)
{ T10 = T1;
dT10_dVg = pParam->BSIM4v7mstar * dVgs_eff_dVg;
dT10_dVd = -dVth_dVd * pParam->BSIM4v7mstar;
dT10_dVb = -dVth_dVb * pParam->BSIM4v7mstar;
}
else if (T2 < -EXP_THRESHOLD)
{ T10 = Vtm * log(1.0 + MIN_EXP);
dT10_dVg = 0.0;
dT10_dVd = T10 * dn_dVd;
dT10_dVb = T10 * dn_dVb;
T10 *= n;
}
else
{ ExpVgst = exp(T2);
T3 = Vtm * log(1.0 + ExpVgst);
T10 = n * T3;
dT10_dVg = pParam->BSIM4v7mstar * ExpVgst / (1.0 + ExpVgst);
dT10_dVb = T3 * dn_dVb - dT10_dVg * (dVth_dVb + Vgst * dn_dVb / n);
dT10_dVd = T3 * dn_dVd - dT10_dVg * (dVth_dVd + Vgst * dn_dVd / n);
dT10_dVg *= dVgs_eff_dVg;
}
T1 = pParam->BSIM4v7voffcbn - (1.0 - pParam->BSIM4v7mstar) * Vgst;
T2 = T1 / T0;
if (T2 < -EXP_THRESHOLD)
{ T3 = model->BSIM4v7coxe * MIN_EXP / pParam->BSIM4v7cdep0;
T9 = pParam->BSIM4v7mstar + T3 * n;
dT9_dVg = 0.0;
dT9_dVd = dn_dVd * T3;
dT9_dVb = dn_dVb * T3;
}
else if (T2 > EXP_THRESHOLD)
{ T3 = model->BSIM4v7coxe * MAX_EXP / pParam->BSIM4v7cdep0;
T9 = pParam->BSIM4v7mstar + T3 * n;
dT9_dVg = 0.0;
dT9_dVd = dn_dVd * T3;
dT9_dVb = dn_dVb * T3;
}
else
{ ExpVgst = exp(T2);
T3 = model->BSIM4v7coxe / pParam->BSIM4v7cdep0;
T4 = T3 * ExpVgst;
T5 = T1 * T4 / T0;
T9 = pParam->BSIM4v7mstar + n * T4;
dT9_dVg = T3 * (pParam->BSIM4v7mstar - 1.0) * ExpVgst / Vtm;
dT9_dVb = T4 * dn_dVb - dT9_dVg * dVth_dVb - T5 * dn_dVb;
dT9_dVd = T4 * dn_dVd - dT9_dVg * dVth_dVd - T5 * dn_dVd;
dT9_dVg *= dVgs_eff_dVg;
}
here->BSIM4v7Vgsteff = Vgsteff = T10 / T9;
T11 = T9 * T9;
dVgsteff_dVg = (T9 * dT10_dVg - T10 * dT9_dVg) / T11;
dVgsteff_dVd = (T9 * dT10_dVd - T10 * dT9_dVd) / T11;
dVgsteff_dVb = (T9 * dT10_dVb - T10 * dT9_dVb) / T11;
/* Calculate Effective Channel Geometry */
T9 = sqrtPhis - pParam->BSIM4v7sqrtPhi;
Weff = pParam->BSIM4v7weff - 2.0 * (pParam->BSIM4v7dwg * Vgsteff
+ pParam->BSIM4v7dwb * T9);
dWeff_dVg = -2.0 * pParam->BSIM4v7dwg;
dWeff_dVb = -2.0 * pParam->BSIM4v7dwb * dsqrtPhis_dVb;
if (Weff < 2.0e-8) /* to avoid the discontinuity problem due to Weff*/
{ T0 = 1.0 / (6.0e-8 - 2.0 * Weff);
Weff = 2.0e-8 * (4.0e-8 - Weff) * T0;
T0 *= T0 * 4.0e-16;
dWeff_dVg *= T0;
dWeff_dVb *= T0;
}
if (model->BSIM4v7rdsMod == 1)
Rds = dRds_dVg = dRds_dVb = 0.0;
else
{ T0 = 1.0 + pParam->BSIM4v7prwg * Vgsteff;
dT0_dVg = -pParam->BSIM4v7prwg / T0 / T0;
T1 = pParam->BSIM4v7prwb * T9;
dT1_dVb = pParam->BSIM4v7prwb * dsqrtPhis_dVb;
T2 = 1.0 / T0 + T1;
T3 = T2 + sqrt(T2 * T2 + 0.01); /* 0.01 = 4.0 * 0.05 * 0.05 */
dT3_dVg = 1.0 + T2 / (T3 - T2);
dT3_dVb = dT3_dVg * dT1_dVb;
dT3_dVg *= dT0_dVg;
T4 = pParam->BSIM4v7rds0 * 0.5;
Rds = pParam->BSIM4v7rdswmin + T3 * T4;
dRds_dVg = T4 * dT3_dVg;
dRds_dVb = T4 * dT3_dVb;
if (Rds > 0.0)
here->BSIM4v7grdsw = 1.0 / Rds* here->BSIM4v7nf; /*4.6.2*/
else
here->BSIM4v7grdsw = 0.0;
}
/* Calculate Abulk */
T9 = 0.5 * pParam->BSIM4v7k1ox * Lpe_Vb / sqrtPhis;
T1 = T9 + here->BSIM4v7k2ox - pParam->BSIM4v7k3b * Vth_NarrowW;
dT1_dVb = -T9 / sqrtPhis * dsqrtPhis_dVb;
T9 = sqrt(pParam->BSIM4v7xj * Xdep);
tmp1 = Leff + 2.0 * T9;
T5 = Leff / tmp1;
tmp2 = pParam->BSIM4v7a0 * T5;
tmp3 = pParam->BSIM4v7weff + pParam->BSIM4v7b1;
tmp4 = pParam->BSIM4v7b0 / tmp3;
T2 = tmp2 + tmp4;
dT2_dVb = -T9 / tmp1 / Xdep * dXdep_dVb;
T6 = T5 * T5;
T7 = T5 * T6;
Abulk0 = 1.0 + T1 * T2;
dAbulk0_dVb = T1 * tmp2 * dT2_dVb + T2 * dT1_dVb;
T8 = pParam->BSIM4v7ags * pParam->BSIM4v7a0 * T7;
dAbulk_dVg = -T1 * T8;
Abulk = Abulk0 + dAbulk_dVg * Vgsteff;
dAbulk_dVb = dAbulk0_dVb - T8 * Vgsteff * (dT1_dVb
+ 3.0 * T1 * dT2_dVb);
if (Abulk0 < 0.1) /* added to avoid the problems caused by Abulk0 */
{ T9 = 1.0 / (3.0 - 20.0 * Abulk0);
Abulk0 = (0.2 - Abulk0) * T9;
dAbulk0_dVb *= T9 * T9;
}
if (Abulk < 0.1)
{ T9 = 1.0 / (3.0 - 20.0 * Abulk);
Abulk = (0.2 - Abulk) * T9;
T10 = T9 * T9;
dAbulk_dVb *= T10;
dAbulk_dVg *= T10;
}
here->BSIM4v7Abulk = Abulk;
T2 = pParam->BSIM4v7keta * Vbseff;
if (T2 >= -0.9)
{ T0 = 1.0 / (1.0 + T2);
dT0_dVb = -pParam->BSIM4v7keta * T0 * T0;
}
else
{ T1 = 1.0 / (0.8 + T2);
T0 = (17.0 + 20.0 * T2) * T1;
dT0_dVb = -pParam->BSIM4v7keta * T1 * T1;
}
dAbulk_dVg *= T0;
dAbulk_dVb = dAbulk_dVb * T0 + Abulk * dT0_dVb;
dAbulk0_dVb = dAbulk0_dVb * T0 + Abulk0 * dT0_dVb;
Abulk *= T0;
Abulk0 *= T0;
/* Mobility calculation */
if (model->BSIM4v7mtrlMod && model->BSIM4v7mtrlCompatMod == 0)
T14 = 2.0 * model->BSIM4v7type *(model->BSIM4v7phig - model->BSIM4v7easub - 0.5*model->BSIM4v7Eg0 + 0.45);
else
T14 = 0.0;
if (model->BSIM4v7mobMod == 0)
{ T0 = Vgsteff + Vth + Vth - T14;
T2 = pParam->BSIM4v7ua + pParam->BSIM4v7uc * Vbseff;
T3 = T0 / toxe;
T12 = sqrt(Vth * Vth + 0.0001);
T9 = 1.0/(Vgsteff + 2*T12);
T10 = T9*toxe;
T8 = pParam->BSIM4v7ud * T10 * T10 * Vth;
T6 = T8 * Vth;
T5 = T3 * (T2 + pParam->BSIM4v7ub * T3) + T6;
T7 = - 2.0 * T6 * T9;
T11 = T7 * Vth/T12;
dDenomi_dVg = (T2 + 2.0 * pParam->BSIM4v7ub * T3) / toxe;
T13 = 2.0 * (dDenomi_dVg + T11 + T8);
dDenomi_dVd = T13 * dVth_dVd;
dDenomi_dVb = T13 * dVth_dVb + pParam->BSIM4v7uc * T3;
dDenomi_dVg+= T7;
}
else if (model->BSIM4v7mobMod == 1)
{ T0 = Vgsteff + Vth + Vth - T14;
T2 = 1.0 + pParam->BSIM4v7uc * Vbseff;
T3 = T0 / toxe;
T4 = T3 * (pParam->BSIM4v7ua + pParam->BSIM4v7ub * T3);
T12 = sqrt(Vth * Vth + 0.0001);
T9 = 1.0/(Vgsteff + 2*T12);
T10 = T9*toxe;
T8 = pParam->BSIM4v7ud * T10 * T10 * Vth;
T6 = T8 * Vth;
T5 = T4 * T2 + T6;
T7 = - 2.0 * T6 * T9;
T11 = T7 * Vth/T12;
dDenomi_dVg = (pParam->BSIM4v7ua + 2.0 * pParam->BSIM4v7ub * T3) * T2 / toxe;
T13 = 2.0 * (dDenomi_dVg + T11 + T8);
dDenomi_dVd = T13 * dVth_dVd;
dDenomi_dVb = T13 * dVth_dVb + pParam->BSIM4v7uc * T4;
dDenomi_dVg+= T7;
}
else if (model->BSIM4v7mobMod == 2)
{ T0 = (Vgsteff + here->BSIM4v7vtfbphi1) / toxe;
T1 = exp(pParam->BSIM4v7eu * log(T0));
dT1_dVg = T1 * pParam->BSIM4v7eu / T0 / toxe;
T2 = pParam->BSIM4v7ua + pParam->BSIM4v7uc * Vbseff;
T3 = T0 / toxe; /*Do we need it?*/
T12 = sqrt(Vth * Vth + 0.0001);
T9 = 1.0/(Vgsteff + 2*T12);
T10 = T9*toxe;
T8 = pParam->BSIM4v7ud * T10 * T10 * Vth;
T6 = T8 * Vth;
T5 = T1 * T2 + T6;
T7 = - 2.0 * T6 * T9;
T11 = T7 * Vth/T12;
dDenomi_dVg = T2 * dT1_dVg + T7;
T13 = 2.0 * (T11 + T8);
dDenomi_dVd = T13 * dVth_dVd;
dDenomi_dVb = T13 * dVth_dVb + T1 * pParam->BSIM4v7uc;
}
/*high K mobility*/
else
{
/*univsersal mobility*/
T0 = (Vgsteff + here->BSIM4v7vtfbphi1)* 1.0e-8 / toxe/6.0;
T1 = exp(pParam->BSIM4v7eu * log(T0));
dT1_dVg = T1 * pParam->BSIM4v7eu * 1.0e-8/ T0 / toxe/6.0;
T2 = pParam->BSIM4v7ua + pParam->BSIM4v7uc * Vbseff;
/*Coulombic*/
VgsteffVth = pParam->BSIM4v7VgsteffVth;
T10 = exp(pParam->BSIM4v7ucs * log(0.5 + 0.5 * Vgsteff/VgsteffVth));
T11 = pParam->BSIM4v7ud/T10;
dT11_dVg = - 0.5 * pParam->BSIM4v7ucs * T11 /(0.5 + 0.5*Vgsteff/VgsteffVth)/VgsteffVth;
dDenomi_dVg = T2 * dT1_dVg + dT11_dVg;
dDenomi_dVd = 0.0;
dDenomi_dVb = T1 * pParam->BSIM4v7uc;
T5 = T1 * T2 + T11;
}
if (T5 >= -0.8)
{ Denomi = 1.0 + T5;
}
else
{ T9 = 1.0 / (7.0 + 10.0 * T5);
Denomi = (0.6 + T5) * T9;
T9 *= T9;
dDenomi_dVg *= T9;
dDenomi_dVd *= T9;
dDenomi_dVb *= T9;
}
here->BSIM4v7ueff = ueff = here->BSIM4v7u0temp / Denomi;
T9 = -ueff / Denomi;
dueff_dVg = T9 * dDenomi_dVg;
dueff_dVd = T9 * dDenomi_dVd;
dueff_dVb = T9 * dDenomi_dVb;
/* Saturation Drain Voltage Vdsat */
WVCox = Weff * here->BSIM4v7vsattemp * model->BSIM4v7coxe;
WVCoxRds = WVCox * Rds;
Esat = 2.0 * here->BSIM4v7vsattemp / ueff;
here->BSIM4v7EsatL = EsatL = Esat * Leff;
T0 = -EsatL /ueff;
dEsatL_dVg = T0 * dueff_dVg;
dEsatL_dVd = T0 * dueff_dVd;
dEsatL_dVb = T0 * dueff_dVb;
/* Sqrt() */
a1 = pParam->BSIM4v7a1;
if (a1 == 0.0)
{ Lambda = pParam->BSIM4v7a2;
dLambda_dVg = 0.0;
}
else if (a1 > 0.0)
{ T0 = 1.0 - pParam->BSIM4v7a2;
T1 = T0 - pParam->BSIM4v7a1 * Vgsteff - 0.0001;
T2 = sqrt(T1 * T1 + 0.0004 * T0);
Lambda = pParam->BSIM4v7a2 + T0 - 0.5 * (T1 + T2);
dLambda_dVg = 0.5 * pParam->BSIM4v7a1 * (1.0 + T1 / T2);
}
else
{ T1 = pParam->BSIM4v7a2 + pParam->BSIM4v7a1 * Vgsteff - 0.0001;
T2 = sqrt(T1 * T1 + 0.0004 * pParam->BSIM4v7a2);
Lambda = 0.5 * (T1 + T2);
dLambda_dVg = 0.5 * pParam->BSIM4v7a1 * (1.0 + T1 / T2);
}
Vgst2Vtm = Vgsteff + 2.0 * Vtm;
if (Rds > 0)
{ tmp2 = dRds_dVg / Rds + dWeff_dVg / Weff;
tmp3 = dRds_dVb / Rds + dWeff_dVb / Weff;
}
else
{ tmp2 = dWeff_dVg / Weff;
tmp3 = dWeff_dVb / Weff;
}
if ((Rds == 0.0) && (Lambda == 1.0))
{ T0 = 1.0 / (Abulk * EsatL + Vgst2Vtm);
tmp1 = 0.0;
T1 = T0 * T0;
T2 = Vgst2Vtm * T0;
T3 = EsatL * Vgst2Vtm;
Vdsat = T3 * T0;
dT0_dVg = -(Abulk * dEsatL_dVg + EsatL * dAbulk_dVg + 1.0) * T1;
dT0_dVd = -(Abulk * dEsatL_dVd) * T1;
dT0_dVb = -(Abulk * dEsatL_dVb + dAbulk_dVb * EsatL) * T1;
dVdsat_dVg = T3 * dT0_dVg + T2 * dEsatL_dVg + EsatL * T0;
dVdsat_dVd = T3 * dT0_dVd + T2 * dEsatL_dVd;
dVdsat_dVb = T3 * dT0_dVb + T2 * dEsatL_dVb;
}
else
{ tmp1 = dLambda_dVg / (Lambda * Lambda);
T9 = Abulk * WVCoxRds;
T8 = Abulk * T9;
T7 = Vgst2Vtm * T9;
T6 = Vgst2Vtm * WVCoxRds;
T0 = 2.0 * Abulk * (T9 - 1.0 + 1.0 / Lambda);
dT0_dVg = 2.0 * (T8 * tmp2 - Abulk * tmp1
+ (2.0 * T9 + 1.0 / Lambda - 1.0) * dAbulk_dVg);
dT0_dVb = 2.0 * (T8 * (2.0 / Abulk * dAbulk_dVb + tmp3)
+ (1.0 / Lambda - 1.0) * dAbulk_dVb);
dT0_dVd = 0.0;
T1 = Vgst2Vtm * (2.0 / Lambda - 1.0) + Abulk * EsatL + 3.0 * T7;
dT1_dVg = (2.0 / Lambda - 1.0) - 2.0 * Vgst2Vtm * tmp1
+ Abulk * dEsatL_dVg + EsatL * dAbulk_dVg + 3.0 * (T9
+ T7 * tmp2 + T6 * dAbulk_dVg);
dT1_dVb = Abulk * dEsatL_dVb + EsatL * dAbulk_dVb
+ 3.0 * (T6 * dAbulk_dVb + T7 * tmp3);
dT1_dVd = Abulk * dEsatL_dVd;
T2 = Vgst2Vtm * (EsatL + 2.0 * T6);
dT2_dVg = EsatL + Vgst2Vtm * dEsatL_dVg
+ T6 * (4.0 + 2.0 * Vgst2Vtm * tmp2);
dT2_dVb = Vgst2Vtm * (dEsatL_dVb + 2.0 * T6 * tmp3);
dT2_dVd = Vgst2Vtm * dEsatL_dVd;
T3 = sqrt(T1 * T1 - 2.0 * T0 * T2);
Vdsat = (T1 - T3) / T0;
dT3_dVg = (T1 * dT1_dVg - 2.0 * (T0 * dT2_dVg + T2 * dT0_dVg))
/ T3;
dT3_dVd = (T1 * dT1_dVd - 2.0 * (T0 * dT2_dVd + T2 * dT0_dVd))
/ T3;
dT3_dVb = (T1 * dT1_dVb - 2.0 * (T0 * dT2_dVb + T2 * dT0_dVb))
/ T3;
dVdsat_dVg = (dT1_dVg - (T1 * dT1_dVg - dT0_dVg * T2
- T0 * dT2_dVg) / T3 - Vdsat * dT0_dVg) / T0;
dVdsat_dVb = (dT1_dVb - (T1 * dT1_dVb - dT0_dVb * T2
- T0 * dT2_dVb) / T3 - Vdsat * dT0_dVb) / T0;
dVdsat_dVd = (dT1_dVd - (T1 * dT1_dVd - T0 * dT2_dVd) / T3) / T0;
}
here->BSIM4v7vdsat = Vdsat;
/* Calculate Vdseff */
T1 = Vdsat - Vds - pParam->BSIM4v7delta;
dT1_dVg = dVdsat_dVg;
dT1_dVd = dVdsat_dVd - 1.0;
dT1_dVb = dVdsat_dVb;
T2 = sqrt(T1 * T1 + 4.0 * pParam->BSIM4v7delta * Vdsat);
T0 = T1 / T2;
T9 = 2.0 * pParam->BSIM4v7delta;
T3 = T9 / T2;
dT2_dVg = T0 * dT1_dVg + T3 * dVdsat_dVg;
dT2_dVd = T0 * dT1_dVd + T3 * dVdsat_dVd;
dT2_dVb = T0 * dT1_dVb + T3 * dVdsat_dVb;
if (T1 >= 0.0)
{ Vdseff = Vdsat - 0.5 * (T1 + T2);
dVdseff_dVg = dVdsat_dVg - 0.5 * (dT1_dVg + dT2_dVg);
dVdseff_dVd = dVdsat_dVd - 0.5 * (dT1_dVd + dT2_dVd);
dVdseff_dVb = dVdsat_dVb - 0.5 * (dT1_dVb + dT2_dVb);
}
else
{ T4 = T9 / (T2 - T1);
T5 = 1.0 - T4;
T6 = Vdsat * T4 / (T2 - T1);
Vdseff = Vdsat * T5;
dVdseff_dVg = dVdsat_dVg * T5 + T6 * (dT2_dVg - dT1_dVg);
dVdseff_dVd = dVdsat_dVd * T5 + T6 * (dT2_dVd - dT1_dVd);
dVdseff_dVb = dVdsat_dVb * T5 + T6 * (dT2_dVb - dT1_dVb);
}
if (Vds == 0.0)
{ Vdseff = 0.0;
dVdseff_dVg = 0.0;
dVdseff_dVb = 0.0;
}
if (Vdseff > Vds)
Vdseff = Vds;
diffVds = Vds - Vdseff;
here->BSIM4v7Vdseff = Vdseff;
/* Velocity Overshoot */
if((model->BSIM4v7lambdaGiven) && (model->BSIM4v7lambda > 0.0) )
{
T1 = Leff * ueff;
T2 = pParam->BSIM4v7lambda / T1;
T3 = -T2 / T1 * Leff;
dT2_dVd = T3 * dueff_dVd;
dT2_dVg = T3 * dueff_dVg;
dT2_dVb = T3 * dueff_dVb;
T5 = 1.0 / (Esat * pParam->BSIM4v7litl);
T4 = -T5 / EsatL;
dT5_dVg = dEsatL_dVg * T4;
dT5_dVd = dEsatL_dVd * T4;
dT5_dVb = dEsatL_dVb * T4;
T6 = 1.0 + diffVds * T5;
dT6_dVg = dT5_dVg * diffVds - dVdseff_dVg * T5;
dT6_dVd = dT5_dVd * diffVds + (1.0 - dVdseff_dVd) * T5;
dT6_dVb = dT5_dVb * diffVds - dVdseff_dVb * T5;
T7 = 2.0 / (T6 * T6 + 1.0);
T8 = 1.0 - T7;
T9 = T6 * T7 * T7;
dT8_dVg = T9 * dT6_dVg;
dT8_dVd = T9 * dT6_dVd;
dT8_dVb = T9 * dT6_dVb;
T10 = 1.0 + T2 * T8;
dT10_dVg = dT2_dVg * T8 + T2 * dT8_dVg;
dT10_dVd = dT2_dVd * T8 + T2 * dT8_dVd;
dT10_dVb = dT2_dVb * T8 + T2 * dT8_dVb;
if(T10 == 1.0)
dT10_dVg = dT10_dVd = dT10_dVb = 0.0;
dEsatL_dVg *= T10;
dEsatL_dVg += EsatL * dT10_dVg;
dEsatL_dVd *= T10;
dEsatL_dVd += EsatL * dT10_dVd;
dEsatL_dVb *= T10;
dEsatL_dVb += EsatL * dT10_dVb;
EsatL *= T10;
Esat = EsatL / Leff; /* bugfix by Wenwei Yang (4.6.4) */
here->BSIM4v7EsatL = EsatL;
}
/* Calculate Vasat */
tmp4 = 1.0 - 0.5 * Abulk * Vdsat / Vgst2Vtm;
T9 = WVCoxRds * Vgsteff;
T8 = T9 / Vgst2Vtm;
T0 = EsatL + Vdsat + 2.0 * T9 * tmp4;
T7 = 2.0 * WVCoxRds * tmp4;
dT0_dVg = dEsatL_dVg + dVdsat_dVg + T7 * (1.0 + tmp2 * Vgsteff)
- T8 * (Abulk * dVdsat_dVg - Abulk * Vdsat / Vgst2Vtm
+ Vdsat * dAbulk_dVg);
dT0_dVb = dEsatL_dVb + dVdsat_dVb + T7 * tmp3 * Vgsteff
- T8 * (dAbulk_dVb * Vdsat + Abulk * dVdsat_dVb);
dT0_dVd = dEsatL_dVd + dVdsat_dVd - T8 * Abulk * dVdsat_dVd;
T9 = WVCoxRds * Abulk;
T1 = 2.0 / Lambda - 1.0 + T9;
dT1_dVg = -2.0 * tmp1 + WVCoxRds * (Abulk * tmp2 + dAbulk_dVg);
dT1_dVb = dAbulk_dVb * WVCoxRds + T9 * tmp3;
Vasat = T0 / T1;
dVasat_dVg = (dT0_dVg - Vasat * dT1_dVg) / T1;
dVasat_dVb = (dT0_dVb - Vasat * dT1_dVb) / T1;
dVasat_dVd = dT0_dVd / T1;
/* Calculate Idl first */
tmp1 = here->BSIM4v7vtfbphi2;
tmp2 = 2.0e8 * model->BSIM4v7toxp;
dT0_dVg = 1.0 / tmp2;
T0 = (Vgsteff + tmp1) * dT0_dVg;
tmp3 = exp(model->BSIM4v7bdos * 0.7 * log(T0));
T1 = 1.0 + tmp3;
T2 = model->BSIM4v7bdos * 0.7 * tmp3 / T0;
Tcen = model->BSIM4v7ados * 1.9e-9 / T1;
dTcen_dVg = -Tcen * T2 * dT0_dVg / T1;
Coxeff = epssub * model->BSIM4v7coxp
/ (epssub + model->BSIM4v7coxp * Tcen);
here->BSIM4v7Coxeff = Coxeff;
dCoxeff_dVg = -Coxeff * Coxeff * dTcen_dVg / epssub;
CoxeffWovL = Coxeff * Weff / Leff;
beta = ueff * CoxeffWovL;
T3 = ueff / Leff;
dbeta_dVg = CoxeffWovL * dueff_dVg + T3
* (Weff * dCoxeff_dVg + Coxeff * dWeff_dVg);
dbeta_dVd = CoxeffWovL * dueff_dVd;
dbeta_dVb = CoxeffWovL * dueff_dVb + T3 * Coxeff * dWeff_dVb;
here->BSIM4v7AbovVgst2Vtm = Abulk / Vgst2Vtm;
T0 = 1.0 - 0.5 * Vdseff * here->BSIM4v7AbovVgst2Vtm;
dT0_dVg = -0.5 * (Abulk * dVdseff_dVg
- Abulk * Vdseff / Vgst2Vtm + Vdseff * dAbulk_dVg) / Vgst2Vtm;
dT0_dVd = -0.5 * Abulk * dVdseff_dVd / Vgst2Vtm;
dT0_dVb = -0.5 * (Abulk * dVdseff_dVb + dAbulk_dVb * Vdseff)
/ Vgst2Vtm;
fgche1 = Vgsteff * T0;
dfgche1_dVg = Vgsteff * dT0_dVg + T0;
dfgche1_dVd = Vgsteff * dT0_dVd;
dfgche1_dVb = Vgsteff * dT0_dVb;
T9 = Vdseff / EsatL;
fgche2 = 1.0 + T9;
dfgche2_dVg = (dVdseff_dVg - T9 * dEsatL_dVg) / EsatL;
dfgche2_dVd = (dVdseff_dVd - T9 * dEsatL_dVd) / EsatL;
dfgche2_dVb = (dVdseff_dVb - T9 * dEsatL_dVb) / EsatL;
gche = beta * fgche1 / fgche2;
dgche_dVg = (beta * dfgche1_dVg + fgche1 * dbeta_dVg
- gche * dfgche2_dVg) / fgche2;
dgche_dVd = (beta * dfgche1_dVd + fgche1 * dbeta_dVd
- gche * dfgche2_dVd) / fgche2;
dgche_dVb = (beta * dfgche1_dVb + fgche1 * dbeta_dVb
- gche * dfgche2_dVb) / fgche2;
T0 = 1.0 + gche * Rds;
Idl = gche / T0;
T1 = (1.0 - Idl * Rds) / T0;
T2 = Idl * Idl;
dIdl_dVg = T1 * dgche_dVg - T2 * dRds_dVg;
dIdl_dVd = T1 * dgche_dVd;
dIdl_dVb = T1 * dgche_dVb - T2 * dRds_dVb;
/* Calculate degradation factor due to pocket implant */
if (pParam->BSIM4v7fprout <= 0.0)
{ FP = 1.0;
dFP_dVg = 0.0;
}
else
{ T9 = pParam->BSIM4v7fprout * sqrt(Leff) / Vgst2Vtm;
FP = 1.0 / (1.0 + T9);
dFP_dVg = FP * FP * T9 / Vgst2Vtm;
}
/* Calculate VACLM */
T8 = pParam->BSIM4v7pvag / EsatL;
T9 = T8 * Vgsteff;
if (T9 > -0.9)
{ PvagTerm = 1.0 + T9;
dPvagTerm_dVg = T8 * (1.0 - Vgsteff * dEsatL_dVg / EsatL);
dPvagTerm_dVb = -T9 * dEsatL_dVb / EsatL;
dPvagTerm_dVd = -T9 * dEsatL_dVd / EsatL;
}
else
{ T4 = 1.0 / (17.0 + 20.0 * T9);
PvagTerm = (0.8 + T9) * T4;
T4 *= T4;
dPvagTerm_dVg = T8 * (1.0 - Vgsteff * dEsatL_dVg / EsatL) * T4;
T9 *= T4 / EsatL;
dPvagTerm_dVb = -T9 * dEsatL_dVb;
dPvagTerm_dVd = -T9 * dEsatL_dVd;
}
if ((pParam->BSIM4v7pclm > MIN_EXP) && (diffVds > 1.0e-10))
{ T0 = 1.0 + Rds * Idl;
dT0_dVg = dRds_dVg * Idl + Rds * dIdl_dVg;
dT0_dVd = Rds * dIdl_dVd;
dT0_dVb = dRds_dVb * Idl + Rds * dIdl_dVb;
T2 = Vdsat / Esat;
T1 = Leff + T2;
dT1_dVg = (dVdsat_dVg - T2 * dEsatL_dVg / Leff) / Esat;
dT1_dVd = (dVdsat_dVd - T2 * dEsatL_dVd / Leff) / Esat;
dT1_dVb = (dVdsat_dVb - T2 * dEsatL_dVb / Leff) / Esat;
Cclm = FP * PvagTerm * T0 * T1 / (pParam->BSIM4v7pclm * pParam->BSIM4v7litl);
dCclm_dVg = Cclm * (dFP_dVg / FP + dPvagTerm_dVg / PvagTerm
+ dT0_dVg / T0 + dT1_dVg / T1);
dCclm_dVb = Cclm * (dPvagTerm_dVb / PvagTerm + dT0_dVb / T0
+ dT1_dVb / T1);
dCclm_dVd = Cclm * (dPvagTerm_dVd / PvagTerm + dT0_dVd / T0
+ dT1_dVd / T1);
VACLM = Cclm * diffVds;
dVACLM_dVg = dCclm_dVg * diffVds - dVdseff_dVg * Cclm;
dVACLM_dVb = dCclm_dVb * diffVds - dVdseff_dVb * Cclm;
dVACLM_dVd = dCclm_dVd * diffVds + (1.0 - dVdseff_dVd) * Cclm;
}
else
{ VACLM = Cclm = MAX_EXP;
dVACLM_dVd = dVACLM_dVg = dVACLM_dVb = 0.0;
dCclm_dVd = dCclm_dVg = dCclm_dVb = 0.0;
}
/* Calculate VADIBL */
if (pParam->BSIM4v7thetaRout > MIN_EXP)
{ T8 = Abulk * Vdsat;
T0 = Vgst2Vtm * T8;
dT0_dVg = Vgst2Vtm * Abulk * dVdsat_dVg + T8
+ Vgst2Vtm * Vdsat * dAbulk_dVg;
dT0_dVb = Vgst2Vtm * (dAbulk_dVb * Vdsat + Abulk * dVdsat_dVb);
dT0_dVd = Vgst2Vtm * Abulk * dVdsat_dVd;
T1 = Vgst2Vtm + T8;
dT1_dVg = 1.0 + Abulk * dVdsat_dVg + Vdsat * dAbulk_dVg;
dT1_dVb = Abulk * dVdsat_dVb + dAbulk_dVb * Vdsat;
dT1_dVd = Abulk * dVdsat_dVd;
T9 = T1 * T1;
T2 = pParam->BSIM4v7thetaRout;
VADIBL = (Vgst2Vtm - T0 / T1) / T2;
dVADIBL_dVg = (1.0 - dT0_dVg / T1 + T0 * dT1_dVg / T9) / T2;
dVADIBL_dVb = (-dT0_dVb / T1 + T0 * dT1_dVb / T9) / T2;
dVADIBL_dVd = (-dT0_dVd / T1 + T0 * dT1_dVd / T9) / T2;
T7 = pParam->BSIM4v7pdiblb * Vbseff;
if (T7 >= -0.9)
{ T3 = 1.0 / (1.0 + T7);
VADIBL *= T3;
dVADIBL_dVg *= T3;
dVADIBL_dVb = (dVADIBL_dVb - VADIBL * pParam->BSIM4v7pdiblb)
* T3;
dVADIBL_dVd *= T3;
}
else
{ T4 = 1.0 / (0.8 + T7);
T3 = (17.0 + 20.0 * T7) * T4;
dVADIBL_dVg *= T3;
dVADIBL_dVb = dVADIBL_dVb * T3
- VADIBL * pParam->BSIM4v7pdiblb * T4 * T4;
dVADIBL_dVd *= T3;
VADIBL *= T3;
}
dVADIBL_dVg = dVADIBL_dVg * PvagTerm + VADIBL * dPvagTerm_dVg;
dVADIBL_dVb = dVADIBL_dVb * PvagTerm + VADIBL * dPvagTerm_dVb;
dVADIBL_dVd = dVADIBL_dVd * PvagTerm + VADIBL * dPvagTerm_dVd;
VADIBL *= PvagTerm;
}
else
{ VADIBL = MAX_EXP;
dVADIBL_dVd = dVADIBL_dVg = dVADIBL_dVb = 0.0;
}
/* Calculate Va */
Va = Vasat + VACLM;
dVa_dVg = dVasat_dVg + dVACLM_dVg;
dVa_dVb = dVasat_dVb + dVACLM_dVb;
dVa_dVd = dVasat_dVd + dVACLM_dVd;
/* Calculate VADITS */
T0 = pParam->BSIM4v7pditsd * Vds;
if (T0 > EXP_THRESHOLD)
{ T1 = MAX_EXP;
dT1_dVd = 0;
}
else
{ T1 = exp(T0);
dT1_dVd = T1 * pParam->BSIM4v7pditsd;
}
if (pParam->BSIM4v7pdits > MIN_EXP)
{ T2 = 1.0 + model->BSIM4v7pditsl * Leff;
VADITS = (1.0 + T2 * T1) / pParam->BSIM4v7pdits;
dVADITS_dVg = VADITS * dFP_dVg;
dVADITS_dVd = FP * T2 * dT1_dVd / pParam->BSIM4v7pdits;
VADITS *= FP;
}
else
{ VADITS = MAX_EXP;
dVADITS_dVg = dVADITS_dVd = 0;
}
/* Calculate VASCBE */
if ((pParam->BSIM4v7pscbe2 > 0.0)&&(pParam->BSIM4v7pscbe1>=0.0)) /*4.6.2*/
{ if (diffVds > pParam->BSIM4v7pscbe1 * pParam->BSIM4v7litl
/ EXP_THRESHOLD)
{ T0 = pParam->BSIM4v7pscbe1 * pParam->BSIM4v7litl / diffVds;
VASCBE = Leff * exp(T0) / pParam->BSIM4v7pscbe2;
T1 = T0 * VASCBE / diffVds;
dVASCBE_dVg = T1 * dVdseff_dVg;
dVASCBE_dVd = -T1 * (1.0 - dVdseff_dVd);
dVASCBE_dVb = T1 * dVdseff_dVb;
}
else
{ VASCBE = MAX_EXP * Leff/pParam->BSIM4v7pscbe2;
dVASCBE_dVg = dVASCBE_dVd = dVASCBE_dVb = 0.0;
}
}
else
{ VASCBE = MAX_EXP;
dVASCBE_dVg = dVASCBE_dVd = dVASCBE_dVb = 0.0;
}
/* Add DIBL to Ids */
T9 = diffVds / VADIBL;
T0 = 1.0 + T9;
Idsa = Idl * T0;
dIdsa_dVg = T0 * dIdl_dVg - Idl * (dVdseff_dVg + T9 * dVADIBL_dVg) / VADIBL;
dIdsa_dVd = T0 * dIdl_dVd + Idl
* (1.0 - dVdseff_dVd - T9 * dVADIBL_dVd) / VADIBL;
dIdsa_dVb = T0 * dIdl_dVb - Idl * (dVdseff_dVb + T9 * dVADIBL_dVb) / VADIBL;
/* Add DITS to Ids */
T9 = diffVds / VADITS;
T0 = 1.0 + T9;
dIdsa_dVg = T0 * dIdsa_dVg - Idsa * (dVdseff_dVg + T9 * dVADITS_dVg) / VADITS;
dIdsa_dVd = T0 * dIdsa_dVd + Idsa
* (1.0 - dVdseff_dVd - T9 * dVADITS_dVd) / VADITS;
dIdsa_dVb = T0 * dIdsa_dVb - Idsa * dVdseff_dVb / VADITS;
Idsa *= T0;
/* Add CLM to Ids */
T0 = log(Va / Vasat);
dT0_dVg = dVa_dVg / Va - dVasat_dVg / Vasat;
dT0_dVb = dVa_dVb / Va - dVasat_dVb / Vasat;
dT0_dVd = dVa_dVd / Va - dVasat_dVd / Vasat;
T1 = T0 / Cclm;
T9 = 1.0 + T1;
dT9_dVg = (dT0_dVg - T1 * dCclm_dVg) / Cclm;
dT9_dVb = (dT0_dVb - T1 * dCclm_dVb) / Cclm;
dT9_dVd = (dT0_dVd - T1 * dCclm_dVd) / Cclm;
dIdsa_dVg = dIdsa_dVg * T9 + Idsa * dT9_dVg;
dIdsa_dVb = dIdsa_dVb * T9 + Idsa * dT9_dVb;
dIdsa_dVd = dIdsa_dVd * T9 + Idsa * dT9_dVd;
Idsa *= T9;
/* Substrate current begins */
tmp = pParam->BSIM4v7alpha0 + pParam->BSIM4v7alpha1 * Leff;
if ((tmp <= 0.0) || (pParam->BSIM4v7beta0 <= 0.0))
{ Isub = Gbd = Gbb = Gbg = 0.0;
}
else
{ T2 = tmp / Leff;
if (diffVds > pParam->BSIM4v7beta0 / EXP_THRESHOLD)
{ T0 = -pParam->BSIM4v7beta0 / diffVds;
T1 = T2 * diffVds * exp(T0);
T3 = T1 / diffVds * (T0 - 1.0);
dT1_dVg = T3 * dVdseff_dVg;
dT1_dVd = T3 * (dVdseff_dVd - 1.0);
dT1_dVb = T3 * dVdseff_dVb;
}
else
{ T3 = T2 * MIN_EXP;
T1 = T3 * diffVds;
dT1_dVg = -T3 * dVdseff_dVg;
dT1_dVd = T3 * (1.0 - dVdseff_dVd);
dT1_dVb = -T3 * dVdseff_dVb;
}
T4 = Idsa * Vdseff;
Isub = T1 * T4;
Gbg = T1 * (dIdsa_dVg * Vdseff + Idsa * dVdseff_dVg)
+ T4 * dT1_dVg;
Gbd = T1 * (dIdsa_dVd * Vdseff + Idsa * dVdseff_dVd)
+ T4 * dT1_dVd;
Gbb = T1 * (dIdsa_dVb * Vdseff + Idsa * dVdseff_dVb)
+ T4 * dT1_dVb;
Gbd += Gbg * dVgsteff_dVd;
Gbb += Gbg * dVgsteff_dVb;
Gbg *= dVgsteff_dVg;
Gbb *= dVbseff_dVb;
}
here->BSIM4v7csub = Isub;
here->BSIM4v7gbbs = Gbb;
here->BSIM4v7gbgs = Gbg;
here->BSIM4v7gbds = Gbd;
/* Add SCBE to Ids */
T9 = diffVds / VASCBE;
T0 = 1.0 + T9;
Ids = Idsa * T0;
Gm = T0 * dIdsa_dVg - Idsa
* (dVdseff_dVg + T9 * dVASCBE_dVg) / VASCBE;
Gds = T0 * dIdsa_dVd + Idsa
* (1.0 - dVdseff_dVd - T9 * dVASCBE_dVd) / VASCBE;
Gmb = T0 * dIdsa_dVb - Idsa
* (dVdseff_dVb + T9 * dVASCBE_dVb) / VASCBE;
tmp1 = Gds + Gm * dVgsteff_dVd;
tmp2 = Gmb + Gm * dVgsteff_dVb;
tmp3 = Gm;
Gm = (Ids * dVdseff_dVg + Vdseff * tmp3) * dVgsteff_dVg;
Gds = Ids * (dVdseff_dVd + dVdseff_dVg * dVgsteff_dVd)
+ Vdseff * tmp1;
Gmb = (Ids * (dVdseff_dVb + dVdseff_dVg * dVgsteff_dVb)
+ Vdseff * tmp2) * dVbseff_dVb;
cdrain = Ids * Vdseff;
/* Source End Velocity Limit */
if((model->BSIM4v7vtlGiven) && (model->BSIM4v7vtl > 0.0) ) {
T12 = 1.0 / Leff / CoxeffWovL;
T11 = T12 / Vgsteff;
T10 = -T11 / Vgsteff;
vs = cdrain * T11; /* vs */
dvs_dVg = Gm * T11 + cdrain * T10 * dVgsteff_dVg;
dvs_dVd = Gds * T11 + cdrain * T10 * dVgsteff_dVd;
dvs_dVb = Gmb * T11 + cdrain * T10 * dVgsteff_dVb;
T0 = 2 * MM;
T1 = vs / (pParam->BSIM4v7vtl * pParam->BSIM4v7tfactor);
if(T1 > 0.0)
{ T2 = 1.0 + exp(T0 * log(T1));
T3 = (T2 - 1.0) * T0 / vs;
Fsevl = 1.0 / exp(log(T2)/ T0);
dT2_dVg = T3 * dvs_dVg;
dT2_dVd = T3 * dvs_dVd;
dT2_dVb = T3 * dvs_dVb;
T4 = -1.0 / T0 * Fsevl / T2;
dFsevl_dVg = T4 * dT2_dVg;
dFsevl_dVd = T4 * dT2_dVd;
dFsevl_dVb = T4 * dT2_dVb;
} else {
Fsevl = 1.0;
dFsevl_dVg = 0.0;
dFsevl_dVd = 0.0;
dFsevl_dVb = 0.0;
}
Gm *=Fsevl;
Gm += cdrain * dFsevl_dVg;
Gmb *=Fsevl;
Gmb += cdrain * dFsevl_dVb;
Gds *=Fsevl;
Gds += cdrain * dFsevl_dVd;
cdrain *= Fsevl;
}
here->BSIM4v7gds = Gds;
here->BSIM4v7gm = Gm;
here->BSIM4v7gmbs = Gmb;
here->BSIM4v7IdovVds = Ids;
if( here->BSIM4v7IdovVds <= 1.0e-9) here->BSIM4v7IdovVds = 1.0e-9;
/* Calculate Rg */
if ((here->BSIM4v7rgateMod > 1) ||
(here->BSIM4v7trnqsMod != 0) || (here->BSIM4v7acnqsMod != 0))
{ T9 = pParam->BSIM4v7xrcrg2 * model->BSIM4v7vtm;
T0 = T9 * beta;
dT0_dVd = (dbeta_dVd + dbeta_dVg * dVgsteff_dVd) * T9;
dT0_dVb = (dbeta_dVb + dbeta_dVg * dVgsteff_dVb) * T9;
dT0_dVg = dbeta_dVg * T9;
here->BSIM4v7gcrg = pParam->BSIM4v7xrcrg1 * ( T0 + Ids);
here->BSIM4v7gcrgd = pParam->BSIM4v7xrcrg1 * (dT0_dVd + tmp1);
here->BSIM4v7gcrgb = pParam->BSIM4v7xrcrg1 * (dT0_dVb + tmp2)
* dVbseff_dVb;
here->BSIM4v7gcrgg = pParam->BSIM4v7xrcrg1 * (dT0_dVg + tmp3)
* dVgsteff_dVg;
if (here->BSIM4v7nf != 1.0)
{ here->BSIM4v7gcrg *= here->BSIM4v7nf;
here->BSIM4v7gcrgg *= here->BSIM4v7nf;
here->BSIM4v7gcrgd *= here->BSIM4v7nf;
here->BSIM4v7gcrgb *= here->BSIM4v7nf;
}
if (here->BSIM4v7rgateMod == 2)
{ T10 = here->BSIM4v7grgeltd * here->BSIM4v7grgeltd;
T11 = here->BSIM4v7grgeltd + here->BSIM4v7gcrg;
here->BSIM4v7gcrg = here->BSIM4v7grgeltd * here->BSIM4v7gcrg / T11;
T12 = T10 / T11 / T11;
here->BSIM4v7gcrgg *= T12;
here->BSIM4v7gcrgd *= T12;
here->BSIM4v7gcrgb *= T12;
}
here->BSIM4v7gcrgs = -(here->BSIM4v7gcrgg + here->BSIM4v7gcrgd
+ here->BSIM4v7gcrgb);
}
/* Calculate bias-dependent external S/D resistance */
if (model->BSIM4v7rdsMod)
{ /* Rs(V) */
T0 = vgs - pParam->BSIM4v7vfbsd;
T1 = sqrt(T0 * T0 + 1.0e-4);
vgs_eff = 0.5 * (T0 + T1);
dvgs_eff_dvg = vgs_eff / T1;
T0 = 1.0 + pParam->BSIM4v7prwg * vgs_eff;
dT0_dvg = -pParam->BSIM4v7prwg / T0 / T0 * dvgs_eff_dvg;
T1 = -pParam->BSIM4v7prwb * vbs;
dT1_dvb = -pParam->BSIM4v7prwb;
T2 = 1.0 / T0 + T1;
T3 = T2 + sqrt(T2 * T2 + 0.01);
dT3_dvg = T3 / (T3 - T2);
dT3_dvb = dT3_dvg * dT1_dvb;
dT3_dvg *= dT0_dvg;
T4 = pParam->BSIM4v7rs0 * 0.5;
Rs = pParam->BSIM4v7rswmin + T3 * T4;
dRs_dvg = T4 * dT3_dvg;
dRs_dvb = T4 * dT3_dvb;
T0 = 1.0 + here->BSIM4v7sourceConductance * Rs;
here->BSIM4v7gstot = here->BSIM4v7sourceConductance / T0;
T0 = -here->BSIM4v7gstot * here->BSIM4v7gstot;
dgstot_dvd = 0.0; /* place holder */
dgstot_dvg = T0 * dRs_dvg;
dgstot_dvb = T0 * dRs_dvb;
dgstot_dvs = -(dgstot_dvg + dgstot_dvb + dgstot_dvd);
/* Rd(V) */
T0 = vgd - pParam->BSIM4v7vfbsd;
T1 = sqrt(T0 * T0 + 1.0e-4);
vgd_eff = 0.5 * (T0 + T1);
dvgd_eff_dvg = vgd_eff / T1;
T0 = 1.0 + pParam->BSIM4v7prwg * vgd_eff;
dT0_dvg = -pParam->BSIM4v7prwg / T0 / T0 * dvgd_eff_dvg;
T1 = -pParam->BSIM4v7prwb * vbd;
dT1_dvb = -pParam->BSIM4v7prwb;
T2 = 1.0 / T0 + T1;
T3 = T2 + sqrt(T2 * T2 + 0.01);
dT3_dvg = T3 / (T3 - T2);
dT3_dvb = dT3_dvg * dT1_dvb;
dT3_dvg *= dT0_dvg;
T4 = pParam->BSIM4v7rd0 * 0.5;
Rd = pParam->BSIM4v7rdwmin + T3 * T4;
dRd_dvg = T4 * dT3_dvg;
dRd_dvb = T4 * dT3_dvb;
T0 = 1.0 + here->BSIM4v7drainConductance * Rd;
here->BSIM4v7gdtot = here->BSIM4v7drainConductance / T0;
T0 = -here->BSIM4v7gdtot * here->BSIM4v7gdtot;
dgdtot_dvs = 0.0;
dgdtot_dvg = T0 * dRd_dvg;
dgdtot_dvb = T0 * dRd_dvb;
dgdtot_dvd = -(dgdtot_dvg + dgdtot_dvb + dgdtot_dvs);
here->BSIM4v7gstotd = vses * dgstot_dvd;
here->BSIM4v7gstotg = vses * dgstot_dvg;
here->BSIM4v7gstots = vses * dgstot_dvs;
here->BSIM4v7gstotb = vses * dgstot_dvb;
T2 = vdes - vds;
here->BSIM4v7gdtotd = T2 * dgdtot_dvd;
here->BSIM4v7gdtotg = T2 * dgdtot_dvg;
here->BSIM4v7gdtots = T2 * dgdtot_dvs;
here->BSIM4v7gdtotb = T2 * dgdtot_dvb;
}
else /* WDLiu: for bypass */
{ here->BSIM4v7gstot = here->BSIM4v7gstotd = here->BSIM4v7gstotg = 0.0;
here->BSIM4v7gstots = here->BSIM4v7gstotb = 0.0;
here->BSIM4v7gdtot = here->BSIM4v7gdtotd = here->BSIM4v7gdtotg = 0.0;
here->BSIM4v7gdtots = here->BSIM4v7gdtotb = 0.0;
}
/* GIDL/GISL Models */
if(model->BSIM4v7mtrlMod == 0)
T0 = 3.0 * toxe;
else
T0 = model->BSIM4v7epsrsub * toxe / epsrox;
/* Calculate GIDL current */
vgs_eff = here->BSIM4v7vgs_eff;
dvgs_eff_dvg = here->BSIM4v7dvgs_eff_dvg;
vgd_eff = here->BSIM4v7vgd_eff;
dvgd_eff_dvg = here->BSIM4v7dvgd_eff_dvg;
if (model->BSIM4v7gidlMod==0){
if(model->BSIM4v7mtrlMod ==0)
T1 = (vds - vgs_eff - pParam->BSIM4v7egidl ) / T0;
else
T1 = (vds - vgs_eff - pParam->BSIM4v7egidl + pParam->BSIM4v7vfbsd) / T0;
if ((pParam->BSIM4v7agidl <= 0.0) || (pParam->BSIM4v7bgidl <= 0.0)
|| (T1 <= 0.0) || (pParam->BSIM4v7cgidl <= 0.0) || (vbd > 0.0))
Igidl = Ggidld = Ggidlg = Ggidlb = 0.0;
else {
dT1_dVd = 1.0 / T0;
dT1_dVg = -dvgs_eff_dvg * dT1_dVd;
T2 = pParam->BSIM4v7bgidl / T1;
if (T2 < 100.0)
{ Igidl = pParam->BSIM4v7agidl * pParam->BSIM4v7weffCJ * T1 * exp(-T2);
T3 = Igidl * (1.0 + T2) / T1;
Ggidld = T3 * dT1_dVd;
Ggidlg = T3 * dT1_dVg;
}
else
{ Igidl = pParam->BSIM4v7agidl * pParam->BSIM4v7weffCJ * 3.720075976e-44;
Ggidld = Igidl * dT1_dVd;
Ggidlg = Igidl * dT1_dVg;
Igidl *= T1;
}
T4 = vbd * vbd;
T5 = -vbd * T4;
T6 = pParam->BSIM4v7cgidl + T5;
T7 = T5 / T6;
T8 = 3.0 * pParam->BSIM4v7cgidl * T4 / T6 / T6;
Ggidld = Ggidld * T7 + Igidl * T8;
Ggidlg = Ggidlg * T7;
Ggidlb = -Igidl * T8;
Igidl *= T7;
}
here->BSIM4v7Igidl = Igidl;
here->BSIM4v7ggidld = Ggidld;
here->BSIM4v7ggidlg = Ggidlg;
here->BSIM4v7ggidlb = Ggidlb;
/* Calculate GISL current */
if(model->BSIM4v7mtrlMod ==0)
T1 = (-vds - vgd_eff - pParam->BSIM4v7egisl ) / T0;
else
T1 = (-vds - vgd_eff - pParam->BSIM4v7egisl + pParam->BSIM4v7vfbsd ) / T0;
if ((pParam->BSIM4v7agisl <= 0.0) || (pParam->BSIM4v7bgisl <= 0.0)
|| (T1 <= 0.0) || (pParam->BSIM4v7cgisl <= 0.0) || (vbs > 0.0))
Igisl = Ggisls = Ggislg = Ggislb = 0.0;
else {
dT1_dVd = 1.0 / T0;
dT1_dVg = -dvgd_eff_dvg * dT1_dVd;
T2 = pParam->BSIM4v7bgisl / T1;
if (T2 < 100.0)
{ Igisl = pParam->BSIM4v7agisl * pParam->BSIM4v7weffCJ * T1 * exp(-T2);
T3 = Igisl * (1.0 + T2) / T1;
Ggisls = T3 * dT1_dVd;
Ggislg = T3 * dT1_dVg;
}
else
{ Igisl = pParam->BSIM4v7agisl * pParam->BSIM4v7weffCJ * 3.720075976e-44;
Ggisls = Igisl * dT1_dVd;
Ggislg = Igisl * dT1_dVg;
Igisl *= T1;
}
T4 = vbs * vbs;
T5 = -vbs * T4;
T6 = pParam->BSIM4v7cgisl + T5;
T7 = T5 / T6;
T8 = 3.0 * pParam->BSIM4v7cgisl * T4 / T6 / T6;
Ggisls = Ggisls * T7 + Igisl * T8;
Ggislg = Ggislg * T7;
Ggislb = -Igisl * T8;
Igisl *= T7;
}
here->BSIM4v7Igisl = Igisl;
here->BSIM4v7ggisls = Ggisls;
here->BSIM4v7ggislg = Ggislg;
here->BSIM4v7ggislb = Ggislb;
}
else{
/* v4.7 New Gidl/GISL model */
/* GISL */
if (model->BSIM4v7mtrlMod==0)
T1 = (-vds - pParam->BSIM4v7rgisl * vgd_eff - pParam->BSIM4v7egisl) / T0;
else
T1 = (-vds - pParam->BSIM4v7rgisl * vgd_eff - pParam->BSIM4v7egisl + pParam->BSIM4v7vfbsd) / T0;
if ((pParam->BSIM4v7agisl <= 0.0) ||
(pParam->BSIM4v7bgisl <= 0.0) || (T1 <= 0.0) ||
(pParam->BSIM4v7cgisl < 0.0) )
Igisl = Ggisls = Ggislg = Ggislb = 0.0;
else
{
dT1_dVd = 1 / T0;
dT1_dVg = - pParam->BSIM4v7rgisl * dT1_dVd * dvgd_eff_dvg;
T2 = pParam->BSIM4v7bgisl / T1;
if (T2 < EXPL_THRESHOLD)
{
Igisl = pParam->BSIM4v7weffCJ * pParam->BSIM4v7agisl * T1 * exp(-T2);
T3 = Igisl / T1 * (T2 + 1);
Ggisls = T3 * dT1_dVd;
Ggislg = T3 * dT1_dVg;
}
else
{
T3 = pParam->BSIM4v7weffCJ * pParam->BSIM4v7agisl * MIN_EXPL;
Igisl = T3 * T1 ;
Ggisls = T3 * dT1_dVd;
Ggislg = T3 * dT1_dVg;
}
T4 = vbs - pParam->BSIM4v7fgisl;
if (T4==0)
T5 = EXPL_THRESHOLD;
else
T5 = pParam->BSIM4v7kgisl / T4;
if (T5<EXPL_THRESHOLD)
{T6 = exp(T5);
Ggislb = -Igisl * T6 * T5 / T4;
}
else
{T6 = MAX_EXPL;
Ggislb=0.0;
}
Ggisls*=T6;
Ggislg*=T6;
Igisl*=T6;
}
here->BSIM4v7Igisl = Igisl;
here->BSIM4v7ggisls = Ggisls;
here->BSIM4v7ggislg = Ggislg;
here->BSIM4v7ggislb = Ggislb;
/* End of GISL */
/* GIDL */
if (model->BSIM4v7mtrlMod==0)
T1 = (vds - pParam->BSIM4v7rgidl * vgs_eff - pParam->BSIM4v7egidl) / T0;
else
T1 = (vds - pParam->BSIM4v7rgidl * vgs_eff - pParam->BSIM4v7egidl + pParam->BSIM4v7vfbsd) / T0;
if ((pParam->BSIM4v7agidl <= 0.0) ||
(pParam->BSIM4v7bgidl <= 0.0) || (T1 <= 0.0) ||
(pParam->BSIM4v7cgidl < 0.0) )
Igidl = Ggidld = Ggidlg = Ggidlb = 0.0;
else
{
dT1_dVd = 1 / T0;
dT1_dVg = - pParam->BSIM4v7rgidl * dT1_dVd * dvgs_eff_dvg;
T2 = pParam->BSIM4v7bgidl / T1;
if (T2 < EXPL_THRESHOLD)
{
Igidl = pParam->BSIM4v7weffCJ * pParam->BSIM4v7agidl * T1 * exp(-T2);
T3 = Igidl / T1 * (T2 + 1);
Ggidld = T3 * dT1_dVd;
Ggidlg = T3 * dT1_dVg;
} else
{
T3 = pParam->BSIM4v7weffCJ * pParam->BSIM4v7agidl * MIN_EXPL;
Igidl = T3 * T1 ;
Ggidld = T3 * dT1_dVd;
Ggidlg = T3 * dT1_dVg;
}
T4 = vbd - pParam->BSIM4v7fgidl;
if (T4==0)
T5 = EXPL_THRESHOLD;
else
T5 = pParam->BSIM4v7kgidl / T4;
if (T5<EXPL_THRESHOLD)
{T6 = exp(T5);
Ggidlb = -Igidl * T6 * T5 / T4;
}
else
{T6 = MAX_EXPL;
Ggidlb=0.0;
}
Ggidld *= T6;
Ggidlg *= T6;
Igidl *= T6;
}
here->BSIM4v7Igidl = Igidl;
here->BSIM4v7ggidld = Ggidld;
here->BSIM4v7ggidlg = Ggidlg;
here->BSIM4v7ggidlb = Ggidlb;
/* End of New GIDL */
}
/*End of Gidl*/
/* Calculate gate tunneling current */
if ((model->BSIM4v7igcMod != 0) || (model->BSIM4v7igbMod != 0))
{ Vfb = here->BSIM4v7vfbzb;
V3 = Vfb - Vgs_eff + Vbseff - DELTA_3;
if (Vfb <= 0.0)
T0 = sqrt(V3 * V3 - 4.0 * DELTA_3 * Vfb);
else
T0 = sqrt(V3 * V3 + 4.0 * DELTA_3 * Vfb);
T1 = 0.5 * (1.0 + V3 / T0);
Vfbeff = Vfb - 0.5 * (V3 + T0);
dVfbeff_dVg = T1 * dVgs_eff_dVg;
dVfbeff_dVb = -T1; /* WDLiu: -No surprise? No. -Good! */
Voxacc = Vfb - Vfbeff;
dVoxacc_dVg = -dVfbeff_dVg;
dVoxacc_dVb = -dVfbeff_dVb;
if (Voxacc < 0.0) /* WDLiu: Avoiding numerical instability. */
Voxacc = dVoxacc_dVg = dVoxacc_dVb = 0.0;
T0 = 0.5 * pParam->BSIM4v7k1ox;
T3 = Vgs_eff - Vfbeff - Vbseff - Vgsteff;
if (pParam->BSIM4v7k1ox == 0.0)
Voxdepinv = dVoxdepinv_dVg = dVoxdepinv_dVd
= dVoxdepinv_dVb = 0.0;
else if (T3 < 0.0)
{ Voxdepinv = -T3;
dVoxdepinv_dVg = -dVgs_eff_dVg + dVfbeff_dVg
+ dVgsteff_dVg;
dVoxdepinv_dVd = dVgsteff_dVd;
dVoxdepinv_dVb = dVfbeff_dVb + 1.0 + dVgsteff_dVb;
}
else
{ T1 = sqrt(T0 * T0 + T3);
T2 = T0 / T1;
Voxdepinv = pParam->BSIM4v7k1ox * (T1 - T0);
dVoxdepinv_dVg = T2 * (dVgs_eff_dVg - dVfbeff_dVg
- dVgsteff_dVg);
dVoxdepinv_dVd = -T2 * dVgsteff_dVd;
dVoxdepinv_dVb = -T2 * (dVfbeff_dVb + 1.0 + dVgsteff_dVb);
}
Voxdepinv += Vgsteff;
dVoxdepinv_dVg += dVgsteff_dVg;
dVoxdepinv_dVd += dVgsteff_dVd;
dVoxdepinv_dVb += dVgsteff_dVb;
}
if(model->BSIM4v7tempMod < 2)
tmp = Vtm;
else /* model->BSIM4v7tempMod = 2 , 3*/
tmp = Vtm0;
if (model->BSIM4v7igcMod)
{ T0 = tmp * pParam->BSIM4v7nigc;
if(model->BSIM4v7igcMod == 1) {
VxNVt = (Vgs_eff - model->BSIM4v7type * here->BSIM4v7vth0) / T0;
if (VxNVt > EXP_THRESHOLD)
{ Vaux = Vgs_eff - model->BSIM4v7type * here->BSIM4v7vth0;
dVaux_dVg = dVgs_eff_dVg;
dVaux_dVd = 0.0;
dVaux_dVb = 0.0;
}
} else if (model->BSIM4v7igcMod == 2) {
VxNVt = (Vgs_eff - here->BSIM4v7von) / T0;
if (VxNVt > EXP_THRESHOLD)
{ Vaux = Vgs_eff - here->BSIM4v7von;
dVaux_dVg = dVgs_eff_dVg;
dVaux_dVd = -dVth_dVd;
dVaux_dVb = -dVth_dVb;
}
}
if (VxNVt < -EXP_THRESHOLD)
{ Vaux = T0 * log(1.0 + MIN_EXP);
dVaux_dVg = dVaux_dVd = dVaux_dVb = 0.0;
}
else if ((VxNVt >= -EXP_THRESHOLD) && (VxNVt <= EXP_THRESHOLD))
{ ExpVxNVt = exp(VxNVt);
Vaux = T0 * log(1.0 + ExpVxNVt);
dVaux_dVg = ExpVxNVt / (1.0 + ExpVxNVt);
if(model->BSIM4v7igcMod == 1) {
dVaux_dVd = 0.0;
dVaux_dVb = 0.0;
} else if (model->BSIM4v7igcMod == 2) {
dVaux_dVd = -dVgs_eff_dVg * dVth_dVd;
dVaux_dVb = -dVgs_eff_dVg * dVth_dVb;
}
dVaux_dVg *= dVgs_eff_dVg;
}
T2 = Vgs_eff * Vaux;
dT2_dVg = dVgs_eff_dVg * Vaux + Vgs_eff * dVaux_dVg;
dT2_dVd = Vgs_eff * dVaux_dVd;
dT2_dVb = Vgs_eff * dVaux_dVb;
T11 = pParam->BSIM4v7Aechvb;
T12 = pParam->BSIM4v7Bechvb;
T3 = pParam->BSIM4v7aigc * pParam->BSIM4v7cigc
- pParam->BSIM4v7bigc;
T4 = pParam->BSIM4v7bigc * pParam->BSIM4v7cigc;
T5 = T12 * (pParam->BSIM4v7aigc + T3 * Voxdepinv
- T4 * Voxdepinv * Voxdepinv);
if (T5 > EXP_THRESHOLD)
{ T6 = MAX_EXP;
dT6_dVg = dT6_dVd = dT6_dVb = 0.0;
}
else if (T5 < -EXP_THRESHOLD)
{ T6 = MIN_EXP;
dT6_dVg = dT6_dVd = dT6_dVb = 0.0;
}
else
{ T6 = exp(T5);
dT6_dVg = T6 * T12 * (T3 - 2.0 * T4 * Voxdepinv);
dT6_dVd = dT6_dVg * dVoxdepinv_dVd;
dT6_dVb = dT6_dVg * dVoxdepinv_dVb;
dT6_dVg *= dVoxdepinv_dVg;
}
Igc = T11 * T2 * T6;
dIgc_dVg = T11 * (T2 * dT6_dVg + T6 * dT2_dVg);
dIgc_dVd = T11 * (T2 * dT6_dVd + T6 * dT2_dVd);
dIgc_dVb = T11 * (T2 * dT6_dVb + T6 * dT2_dVb);
if (model->BSIM4v7pigcdGiven)
{ Pigcd = pParam->BSIM4v7pigcd;
dPigcd_dVg = dPigcd_dVd = dPigcd_dVb = 0.0;
}
else
{ /* T11 = pParam->BSIM4v7Bechvb * toxe; v4.7 */
T11 = -pParam->BSIM4v7Bechvb;
T12 = Vgsteff + 1.0e-20;
T13 = T11 / T12 / T12;
T14 = -T13 / T12;
Pigcd = T13 * (1.0 - 0.5 * Vdseff / T12);
dPigcd_dVg = T14 * (2.0 + 0.5 * (dVdseff_dVg
- 3.0 * Vdseff / T12));
dPigcd_dVd = 0.5 * T14 * dVdseff_dVd;
dPigcd_dVb = 0.5 * T14 * dVdseff_dVb;
}
T7 = -Pigcd * Vdseff; /* bugfix */
dT7_dVg = -Vdseff * dPigcd_dVg - Pigcd * dVdseff_dVg;
dT7_dVd = -Vdseff * dPigcd_dVd - Pigcd * dVdseff_dVd + dT7_dVg * dVgsteff_dVd;
dT7_dVb = -Vdseff * dPigcd_dVb - Pigcd * dVdseff_dVb + dT7_dVg * dVgsteff_dVb;
dT7_dVg *= dVgsteff_dVg;
dT7_dVb *= dVbseff_dVb;
T8 = T7 * T7 + 2.0e-4;
dT8_dVg = 2.0 * T7;
dT8_dVd = dT8_dVg * dT7_dVd;
dT8_dVb = dT8_dVg * dT7_dVb;
dT8_dVg *= dT7_dVg;
if (T7 > EXP_THRESHOLD)
{ T9 = MAX_EXP;
dT9_dVg = dT9_dVd = dT9_dVb = 0.0;
}
else if (T7 < -EXP_THRESHOLD)
{ T9 = MIN_EXP;
dT9_dVg = dT9_dVd = dT9_dVb = 0.0;
}
else
{ T9 = exp(T7);
dT9_dVg = T9 * dT7_dVg;
dT9_dVd = T9 * dT7_dVd;
dT9_dVb = T9 * dT7_dVb;
}
T0 = T8 * T8;
T1 = T9 - 1.0 + 1.0e-4;
T10 = (T1 - T7) / T8;
dT10_dVg = (dT9_dVg - dT7_dVg - T10 * dT8_dVg) / T8;
dT10_dVd = (dT9_dVd - dT7_dVd - T10 * dT8_dVd) / T8;
dT10_dVb = (dT9_dVb - dT7_dVb - T10 * dT8_dVb) / T8;
Igcs = Igc * T10;
dIgcs_dVg = dIgc_dVg * T10 + Igc * dT10_dVg;
dIgcs_dVd = dIgc_dVd * T10 + Igc * dT10_dVd;
dIgcs_dVb = dIgc_dVb * T10 + Igc * dT10_dVb;
T1 = T9 - 1.0 - 1.0e-4;
T10 = (T7 * T9 - T1) / T8;
dT10_dVg = (dT7_dVg * T9 + (T7 - 1.0) * dT9_dVg
- T10 * dT8_dVg) / T8;
dT10_dVd = (dT7_dVd * T9 + (T7 - 1.0) * dT9_dVd
- T10 * dT8_dVd) / T8;
dT10_dVb = (dT7_dVb * T9 + (T7 - 1.0) * dT9_dVb
- T10 * dT8_dVb) / T8;
Igcd = Igc * T10;
dIgcd_dVg = dIgc_dVg * T10 + Igc * dT10_dVg;
dIgcd_dVd = dIgc_dVd * T10 + Igc * dT10_dVd;
dIgcd_dVb = dIgc_dVb * T10 + Igc * dT10_dVb;
here->BSIM4v7Igcs = Igcs;
here->BSIM4v7gIgcsg = dIgcs_dVg;
here->BSIM4v7gIgcsd = dIgcs_dVd;
here->BSIM4v7gIgcsb = dIgcs_dVb * dVbseff_dVb;
here->BSIM4v7Igcd = Igcd;
here->BSIM4v7gIgcdg = dIgcd_dVg;
here->BSIM4v7gIgcdd = dIgcd_dVd;
here->BSIM4v7gIgcdb = dIgcd_dVb * dVbseff_dVb;
T0 = vgs - (pParam->BSIM4v7vfbsd + pParam->BSIM4v7vfbsdoff);
vgs_eff = sqrt(T0 * T0 + 1.0e-4);
dvgs_eff_dvg = T0 / vgs_eff;
T2 = vgs * vgs_eff;
dT2_dVg = vgs * dvgs_eff_dvg + vgs_eff;
T11 = pParam->BSIM4v7AechvbEdgeS;
T12 = pParam->BSIM4v7BechvbEdge;
T3 = pParam->BSIM4v7aigs * pParam->BSIM4v7cigs
- pParam->BSIM4v7bigs;
T4 = pParam->BSIM4v7bigs * pParam->BSIM4v7cigs;
T5 = T12 * (pParam->BSIM4v7aigs + T3 * vgs_eff
- T4 * vgs_eff * vgs_eff);
if (T5 > EXP_THRESHOLD)
{ T6 = MAX_EXP;
dT6_dVg = 0.0;
}
else if (T5 < -EXP_THRESHOLD)
{ T6 = MIN_EXP;
dT6_dVg = 0.0;
}
else
{ T6 = exp(T5);
dT6_dVg = T6 * T12 * (T3 - 2.0 * T4 * vgs_eff)
* dvgs_eff_dvg;
}
Igs = T11 * T2 * T6;
dIgs_dVg = T11 * (T2 * dT6_dVg + T6 * dT2_dVg);
dIgs_dVs = -dIgs_dVg;
T0 = vgd - (pParam->BSIM4v7vfbsd + pParam->BSIM4v7vfbsdoff);
vgd_eff = sqrt(T0 * T0 + 1.0e-4);
dvgd_eff_dvg = T0 / vgd_eff;
T2 = vgd * vgd_eff;
dT2_dVg = vgd * dvgd_eff_dvg + vgd_eff;
T11 = pParam->BSIM4v7AechvbEdgeD;
T3 = pParam->BSIM4v7aigd * pParam->BSIM4v7cigd
- pParam->BSIM4v7bigd;
T4 = pParam->BSIM4v7bigd * pParam->BSIM4v7cigd;
T5 = T12 * (pParam->BSIM4v7aigd + T3 * vgd_eff
- T4 * vgd_eff * vgd_eff);
if (T5 > EXP_THRESHOLD)
{ T6 = MAX_EXP;
dT6_dVg = 0.0;
}
else if (T5 < -EXP_THRESHOLD)
{ T6 = MIN_EXP;
dT6_dVg = 0.0;
}
else
{ T6 = exp(T5);
dT6_dVg = T6 * T12 * (T3 - 2.0 * T4 * vgd_eff)
* dvgd_eff_dvg;
}
Igd = T11 * T2 * T6;
dIgd_dVg = T11 * (T2 * dT6_dVg + T6 * dT2_dVg);
dIgd_dVd = -dIgd_dVg;
here->BSIM4v7Igs = Igs;
here->BSIM4v7gIgsg = dIgs_dVg;
here->BSIM4v7gIgss = dIgs_dVs;
here->BSIM4v7Igd = Igd;
here->BSIM4v7gIgdg = dIgd_dVg;
here->BSIM4v7gIgdd = dIgd_dVd;
}
else
{ here->BSIM4v7Igcs = here->BSIM4v7gIgcsg = here->BSIM4v7gIgcsd
= here->BSIM4v7gIgcsb = 0.0;
here->BSIM4v7Igcd = here->BSIM4v7gIgcdg = here->BSIM4v7gIgcdd
= here->BSIM4v7gIgcdb = 0.0;
here->BSIM4v7Igs = here->BSIM4v7gIgsg = here->BSIM4v7gIgss = 0.0;
here->BSIM4v7Igd = here->BSIM4v7gIgdg = here->BSIM4v7gIgdd = 0.0;
}
if (model->BSIM4v7igbMod)
{ T0 = tmp * pParam->BSIM4v7nigbacc;
T1 = -Vgs_eff + Vbseff + Vfb;
VxNVt = T1 / T0;
if (VxNVt > EXP_THRESHOLD)
{ Vaux = T1;
dVaux_dVg = -dVgs_eff_dVg;
dVaux_dVb = 1.0;
}
else if (VxNVt < -EXP_THRESHOLD)
{ Vaux = T0 * log(1.0 + MIN_EXP);
dVaux_dVg = dVaux_dVb = 0.0;
}
else
{ ExpVxNVt = exp(VxNVt);
Vaux = T0 * log(1.0 + ExpVxNVt);
dVaux_dVb = ExpVxNVt / (1.0 + ExpVxNVt);
dVaux_dVg = -dVaux_dVb * dVgs_eff_dVg;
}
T2 = (Vgs_eff - Vbseff) * Vaux;
dT2_dVg = dVgs_eff_dVg * Vaux + (Vgs_eff - Vbseff) * dVaux_dVg;
dT2_dVb = -Vaux + (Vgs_eff - Vbseff) * dVaux_dVb;
T11 = 4.97232e-7 * pParam->BSIM4v7weff
* pParam->BSIM4v7leff * pParam->BSIM4v7ToxRatio;
T12 = -7.45669e11 * toxe;
T3 = pParam->BSIM4v7aigbacc * pParam->BSIM4v7cigbacc
- pParam->BSIM4v7bigbacc;
T4 = pParam->BSIM4v7bigbacc * pParam->BSIM4v7cigbacc;
T5 = T12 * (pParam->BSIM4v7aigbacc + T3 * Voxacc
- T4 * Voxacc * Voxacc);
if (T5 > EXP_THRESHOLD)
{ T6 = MAX_EXP;
dT6_dVg = dT6_dVb = 0.0;
}
else if (T5 < -EXP_THRESHOLD)
{ T6 = MIN_EXP;
dT6_dVg = dT6_dVb = 0.0;
}
else
{ T6 = exp(T5);
dT6_dVg = T6 * T12 * (T3 - 2.0 * T4 * Voxacc);
dT6_dVb = dT6_dVg * dVoxacc_dVb;
dT6_dVg *= dVoxacc_dVg;
}
Igbacc = T11 * T2 * T6;
dIgbacc_dVg = T11 * (T2 * dT6_dVg + T6 * dT2_dVg);
dIgbacc_dVb = T11 * (T2 * dT6_dVb + T6 * dT2_dVb);
T0 = tmp * pParam->BSIM4v7nigbinv;
T1 = Voxdepinv - pParam->BSIM4v7eigbinv;
VxNVt = T1 / T0;
if (VxNVt > EXP_THRESHOLD)
{ Vaux = T1;
dVaux_dVg = dVoxdepinv_dVg;
dVaux_dVd = dVoxdepinv_dVd;
dVaux_dVb = dVoxdepinv_dVb;
}
else if (VxNVt < -EXP_THRESHOLD)
{ Vaux = T0 * log(1.0 + MIN_EXP);
dVaux_dVg = dVaux_dVd = dVaux_dVb = 0.0;
}
else
{ ExpVxNVt = exp(VxNVt);
Vaux = T0 * log(1.0 + ExpVxNVt);
dVaux_dVg = ExpVxNVt / (1.0 + ExpVxNVt);
dVaux_dVd = dVaux_dVg * dVoxdepinv_dVd;
dVaux_dVb = dVaux_dVg * dVoxdepinv_dVb;
dVaux_dVg *= dVoxdepinv_dVg;
}
T2 = (Vgs_eff - Vbseff) * Vaux;
dT2_dVg = dVgs_eff_dVg * Vaux + (Vgs_eff - Vbseff) * dVaux_dVg;
dT2_dVd = (Vgs_eff - Vbseff) * dVaux_dVd;
dT2_dVb = -Vaux + (Vgs_eff - Vbseff) * dVaux_dVb;
T11 *= 0.75610;
T12 *= 1.31724;
T3 = pParam->BSIM4v7aigbinv * pParam->BSIM4v7cigbinv
- pParam->BSIM4v7bigbinv;
T4 = pParam->BSIM4v7bigbinv * pParam->BSIM4v7cigbinv;
T5 = T12 * (pParam->BSIM4v7aigbinv + T3 * Voxdepinv
- T4 * Voxdepinv * Voxdepinv);
if (T5 > EXP_THRESHOLD)
{ T6 = MAX_EXP;
dT6_dVg = dT6_dVd = dT6_dVb = 0.0;
}
else if (T5 < -EXP_THRESHOLD)
{ T6 = MIN_EXP;
dT6_dVg = dT6_dVd = dT6_dVb = 0.0;
}
else
{ T6 = exp(T5);
dT6_dVg = T6 * T12 * (T3 - 2.0 * T4 * Voxdepinv);
dT6_dVd = dT6_dVg * dVoxdepinv_dVd;
dT6_dVb = dT6_dVg * dVoxdepinv_dVb;
dT6_dVg *= dVoxdepinv_dVg;
}
Igbinv = T11 * T2 * T6;
dIgbinv_dVg = T11 * (T2 * dT6_dVg + T6 * dT2_dVg);
dIgbinv_dVd = T11 * (T2 * dT6_dVd + T6 * dT2_dVd);
dIgbinv_dVb = T11 * (T2 * dT6_dVb + T6 * dT2_dVb);
here->BSIM4v7Igb = Igbinv + Igbacc;
here->BSIM4v7gIgbg = dIgbinv_dVg + dIgbacc_dVg;
here->BSIM4v7gIgbd = dIgbinv_dVd;
here->BSIM4v7gIgbb = (dIgbinv_dVb + dIgbacc_dVb) * dVbseff_dVb;
}
else
{ here->BSIM4v7Igb = here->BSIM4v7gIgbg = here->BSIM4v7gIgbd
= here->BSIM4v7gIgbs = here->BSIM4v7gIgbb = 0.0;
} /* End of Gate current */
if (here->BSIM4v7nf != 1.0)
{ cdrain *= here->BSIM4v7nf;
here->BSIM4v7gds *= here->BSIM4v7nf;
here->BSIM4v7gm *= here->BSIM4v7nf;
here->BSIM4v7gmbs *= here->BSIM4v7nf;
here->BSIM4v7IdovVds *= here->BSIM4v7nf;
here->BSIM4v7gbbs *= here->BSIM4v7nf;
here->BSIM4v7gbgs *= here->BSIM4v7nf;
here->BSIM4v7gbds *= here->BSIM4v7nf;
here->BSIM4v7csub *= here->BSIM4v7nf;
here->BSIM4v7Igidl *= here->BSIM4v7nf;
here->BSIM4v7ggidld *= here->BSIM4v7nf;
here->BSIM4v7ggidlg *= here->BSIM4v7nf;
here->BSIM4v7ggidlb *= here->BSIM4v7nf;
here->BSIM4v7Igisl *= here->BSIM4v7nf;
here->BSIM4v7ggisls *= here->BSIM4v7nf;
here->BSIM4v7ggislg *= here->BSIM4v7nf;
here->BSIM4v7ggislb *= here->BSIM4v7nf;
here->BSIM4v7Igcs *= here->BSIM4v7nf;
here->BSIM4v7gIgcsg *= here->BSIM4v7nf;
here->BSIM4v7gIgcsd *= here->BSIM4v7nf;
here->BSIM4v7gIgcsb *= here->BSIM4v7nf;
here->BSIM4v7Igcd *= here->BSIM4v7nf;
here->BSIM4v7gIgcdg *= here->BSIM4v7nf;
here->BSIM4v7gIgcdd *= here->BSIM4v7nf;
here->BSIM4v7gIgcdb *= here->BSIM4v7nf;
here->BSIM4v7Igs *= here->BSIM4v7nf;
here->BSIM4v7gIgsg *= here->BSIM4v7nf;
here->BSIM4v7gIgss *= here->BSIM4v7nf;
here->BSIM4v7Igd *= here->BSIM4v7nf;
here->BSIM4v7gIgdg *= here->BSIM4v7nf;
here->BSIM4v7gIgdd *= here->BSIM4v7nf;
here->BSIM4v7Igb *= here->BSIM4v7nf;
here->BSIM4v7gIgbg *= here->BSIM4v7nf;
here->BSIM4v7gIgbd *= here->BSIM4v7nf;
here->BSIM4v7gIgbb *= here->BSIM4v7nf;
}
here->BSIM4v7ggidls = -(here->BSIM4v7ggidld + here->BSIM4v7ggidlg
+ here->BSIM4v7ggidlb);
here->BSIM4v7ggisld = -(here->BSIM4v7ggisls + here->BSIM4v7ggislg
+ here->BSIM4v7ggislb);
here->BSIM4v7gIgbs = -(here->BSIM4v7gIgbg + here->BSIM4v7gIgbd
+ here->BSIM4v7gIgbb);
here->BSIM4v7gIgcss = -(here->BSIM4v7gIgcsg + here->BSIM4v7gIgcsd
+ here->BSIM4v7gIgcsb);
here->BSIM4v7gIgcds = -(here->BSIM4v7gIgcdg + here->BSIM4v7gIgcdd
+ here->BSIM4v7gIgcdb);
here->BSIM4v7cd = cdrain;
/* Calculations for noise analysis */
if (model->BSIM4v7tnoiMod == 0)
{ Abulk = Abulk0 * pParam->BSIM4v7abulkCVfactor;
Vdsat = Vgsteff / Abulk;
T0 = Vdsat - Vds - DELTA_4;
T1 = sqrt(T0 * T0 + 4.0 * DELTA_4 * Vdsat);
if (T0 >= 0.0)
Vdseff = Vdsat - 0.5 * (T0 + T1);
else
{ T3 = (DELTA_4 + DELTA_4) / (T1 - T0);
T4 = 1.0 - T3;
T5 = Vdsat * T3 / (T1 - T0);
Vdseff = Vdsat * T4;
}
if (Vds == 0.0)
Vdseff = 0.0;
T0 = Abulk * Vdseff;
T1 = 12.0 * (Vgsteff - 0.5 * T0 + 1.0e-20);
T2 = Vdseff / T1;
T3 = T0 * T2;
here->BSIM4v7qinv = Coxeff * pParam->BSIM4v7weffCV * here->BSIM4v7nf
* pParam->BSIM4v7leffCV
* (Vgsteff - 0.5 * T0 + Abulk * T3);
}
else if(model->BSIM4v7tnoiMod == 2)
{
here->BSIM4v7noiGd0 = here->BSIM4v7nf * beta * Vgsteff / (1.0 + gche * Rds);
}
/*
* BSIM4v7 C-V begins
*/
if ((model->BSIM4v7xpart < 0) || (!ChargeComputationNeeded))
{ qgate = qdrn = qsrc = qbulk = 0.0;
here->BSIM4v7cggb = here->BSIM4v7cgsb = here->BSIM4v7cgdb = 0.0;
here->BSIM4v7cdgb = here->BSIM4v7cdsb = here->BSIM4v7cddb = 0.0;
here->BSIM4v7cbgb = here->BSIM4v7cbsb = here->BSIM4v7cbdb = 0.0;
here->BSIM4v7csgb = here->BSIM4v7cssb = here->BSIM4v7csdb = 0.0;
here->BSIM4v7cgbb = here->BSIM4v7csbb = here->BSIM4v7cdbb = here->BSIM4v7cbbb = 0.0;
here->BSIM4v7cqdb = here->BSIM4v7cqsb = here->BSIM4v7cqgb
= here->BSIM4v7cqbb = 0.0;
here->BSIM4v7gtau = 0.0;
goto finished;
}
else if (model->BSIM4v7capMod == 0)
{
if (Vbseff < 0.0)
{ VbseffCV = Vbs; /*4.6.2*/
dVbseffCV_dVb = 1.0;
}
else
{ VbseffCV = pParam->BSIM4v7phi - Phis;
dVbseffCV_dVb = -dPhis_dVb * dVbseff_dVb; /*4.6.2*/
}
Vfb = pParam->BSIM4v7vfbcv;
Vth = Vfb + pParam->BSIM4v7phi + pParam->BSIM4v7k1ox * sqrtPhis;
Vgst = Vgs_eff - Vth;
dVth_dVb = pParam->BSIM4v7k1ox * dsqrtPhis_dVb *dVbseff_dVb; /*4.6.2*/
dVgst_dVb = -dVth_dVb;
dVgst_dVg = dVgs_eff_dVg;
CoxWL = model->BSIM4v7coxe * pParam->BSIM4v7weffCV
* pParam->BSIM4v7leffCV * here->BSIM4v7nf;
Arg1 = Vgs_eff - VbseffCV - Vfb;
if (Arg1 <= 0.0)
{ qgate = CoxWL * Arg1;
qbulk = -qgate;
qdrn = 0.0;
here->BSIM4v7cggb = CoxWL * dVgs_eff_dVg;
here->BSIM4v7cgdb = 0.0;
here->BSIM4v7cgsb = CoxWL * (dVbseffCV_dVb - dVgs_eff_dVg);
here->BSIM4v7cdgb = 0.0;
here->BSIM4v7cddb = 0.0;
here->BSIM4v7cdsb = 0.0;
here->BSIM4v7cbgb = -CoxWL * dVgs_eff_dVg;
here->BSIM4v7cbdb = 0.0;
here->BSIM4v7cbsb = -here->BSIM4v7cgsb;
} /* Arg1 <= 0.0, end of accumulation */
else if (Vgst <= 0.0)
{ T1 = 0.5 * pParam->BSIM4v7k1ox;
T2 = sqrt(T1 * T1 + Arg1);
qgate = CoxWL * pParam->BSIM4v7k1ox * (T2 - T1);
qbulk = -qgate;
qdrn = 0.0;
T0 = CoxWL * T1 / T2;
here->BSIM4v7cggb = T0 * dVgs_eff_dVg;
here->BSIM4v7cgdb = 0.0;
here->BSIM4v7cgsb = T0 * (dVbseffCV_dVb - dVgs_eff_dVg);
here->BSIM4v7cdgb = 0.0;
here->BSIM4v7cddb = 0.0;
here->BSIM4v7cdsb = 0.0;
here->BSIM4v7cbgb = -here->BSIM4v7cggb;
here->BSIM4v7cbdb = 0.0;
here->BSIM4v7cbsb = -here->BSIM4v7cgsb;
} /* Vgst <= 0.0, end of depletion */
else
{ One_Third_CoxWL = CoxWL / 3.0;
Two_Third_CoxWL = 2.0 * One_Third_CoxWL;
AbulkCV = Abulk0 * pParam->BSIM4v7abulkCVfactor;
dAbulkCV_dVb = pParam->BSIM4v7abulkCVfactor * dAbulk0_dVb*dVbseff_dVb;
dVdsat_dVg = 1.0 / AbulkCV; /*4.6.2*/
Vdsat = Vgst * dVdsat_dVg;
dVdsat_dVb = - (Vdsat * dAbulkCV_dVb + dVth_dVb)* dVdsat_dVg;
if (model->BSIM4v7xpart > 0.5)
{ /* 0/100 Charge partition model */
if (Vdsat <= Vds)
{ /* saturation region */
T1 = Vdsat / 3.0;
qgate = CoxWL * (Vgs_eff - Vfb
- pParam->BSIM4v7phi - T1);
T2 = -Two_Third_CoxWL * Vgst;
qbulk = -(qgate + T2);
qdrn = 0.0;
here->BSIM4v7cggb = One_Third_CoxWL * (3.0
- dVdsat_dVg) * dVgs_eff_dVg;
T2 = -One_Third_CoxWL * dVdsat_dVb;
here->BSIM4v7cgsb = -(here->BSIM4v7cggb + T2);
here->BSIM4v7cgdb = 0.0;
here->BSIM4v7cdgb = 0.0;
here->BSIM4v7cddb = 0.0;
here->BSIM4v7cdsb = 0.0;
here->BSIM4v7cbgb = -(here->BSIM4v7cggb
- Two_Third_CoxWL * dVgs_eff_dVg);
T3 = -(T2 + Two_Third_CoxWL * dVth_dVb);
here->BSIM4v7cbsb = -(here->BSIM4v7cbgb + T3);
here->BSIM4v7cbdb = 0.0;
}
else
{ /* linear region */
Alphaz = Vgst / Vdsat;
T1 = 2.0 * Vdsat - Vds;
T2 = Vds / (3.0 * T1);
T3 = T2 * Vds;
T9 = 0.25 * CoxWL;
T4 = T9 * Alphaz;
T7 = 2.0 * Vds - T1 - 3.0 * T3;
T8 = T3 - T1 - 2.0 * Vds;
qgate = CoxWL * (Vgs_eff - Vfb
- pParam->BSIM4v7phi - 0.5 * (Vds - T3));
T10 = T4 * T8;
qdrn = T4 * T7;
qbulk = -(qgate + qdrn + T10);
T5 = T3 / T1;
here->BSIM4v7cggb = CoxWL * (1.0 - T5 * dVdsat_dVg)
* dVgs_eff_dVg;
T11 = -CoxWL * T5 * dVdsat_dVb;
here->BSIM4v7cgdb = CoxWL * (T2 - 0.5 + 0.5 * T5);
here->BSIM4v7cgsb = -(here->BSIM4v7cggb + T11
+ here->BSIM4v7cgdb);
T6 = 1.0 / Vdsat;
dAlphaz_dVg = T6 * (1.0 - Alphaz * dVdsat_dVg);
dAlphaz_dVb = -T6 * (dVth_dVb + Alphaz * dVdsat_dVb);
T7 = T9 * T7;
T8 = T9 * T8;
T9 = 2.0 * T4 * (1.0 - 3.0 * T5);
here->BSIM4v7cdgb = (T7 * dAlphaz_dVg - T9
* dVdsat_dVg) * dVgs_eff_dVg;
T12 = T7 * dAlphaz_dVb - T9 * dVdsat_dVb;
here->BSIM4v7cddb = T4 * (3.0 - 6.0 * T2 - 3.0 * T5);
here->BSIM4v7cdsb = -(here->BSIM4v7cdgb + T12
+ here->BSIM4v7cddb);
T9 = 2.0 * T4 * (1.0 + T5);
T10 = (T8 * dAlphaz_dVg - T9 * dVdsat_dVg)
* dVgs_eff_dVg;
T11 = T8 * dAlphaz_dVb - T9 * dVdsat_dVb;
T12 = T4 * (2.0 * T2 + T5 - 1.0);
T0 = -(T10 + T11 + T12);
here->BSIM4v7cbgb = -(here->BSIM4v7cggb
+ here->BSIM4v7cdgb + T10);
here->BSIM4v7cbdb = -(here->BSIM4v7cgdb
+ here->BSIM4v7cddb + T12);
here->BSIM4v7cbsb = -(here->BSIM4v7cgsb
+ here->BSIM4v7cdsb + T0);
}
}
else if (model->BSIM4v7xpart < 0.5)
{ /* 40/60 Charge partition model */
if (Vds >= Vdsat)
{ /* saturation region */
T1 = Vdsat / 3.0;
qgate = CoxWL * (Vgs_eff - Vfb
- pParam->BSIM4v7phi - T1);
T2 = -Two_Third_CoxWL * Vgst;
qbulk = -(qgate + T2);
qdrn = 0.4 * T2;
here->BSIM4v7cggb = One_Third_CoxWL * (3.0
- dVdsat_dVg) * dVgs_eff_dVg;
T2 = -One_Third_CoxWL * dVdsat_dVb;
here->BSIM4v7cgsb = -(here->BSIM4v7cggb + T2);
here->BSIM4v7cgdb = 0.0;
T3 = 0.4 * Two_Third_CoxWL;
here->BSIM4v7cdgb = -T3 * dVgs_eff_dVg;
here->BSIM4v7cddb = 0.0;
T4 = T3 * dVth_dVb;
here->BSIM4v7cdsb = -(T4 + here->BSIM4v7cdgb);
here->BSIM4v7cbgb = -(here->BSIM4v7cggb
- Two_Third_CoxWL * dVgs_eff_dVg);
T3 = -(T2 + Two_Third_CoxWL * dVth_dVb);
here->BSIM4v7cbsb = -(here->BSIM4v7cbgb + T3);
here->BSIM4v7cbdb = 0.0;
}
else
{ /* linear region */
Alphaz = Vgst / Vdsat;
T1 = 2.0 * Vdsat - Vds;
T2 = Vds / (3.0 * T1);
T3 = T2 * Vds;
T9 = 0.25 * CoxWL;
T4 = T9 * Alphaz;
qgate = CoxWL * (Vgs_eff - Vfb - pParam->BSIM4v7phi
- 0.5 * (Vds - T3));
T5 = T3 / T1;
here->BSIM4v7cggb = CoxWL * (1.0 - T5 * dVdsat_dVg)
* dVgs_eff_dVg;
tmp = -CoxWL * T5 * dVdsat_dVb;
here->BSIM4v7cgdb = CoxWL * (T2 - 0.5 + 0.5 * T5);
here->BSIM4v7cgsb = -(here->BSIM4v7cggb
+ here->BSIM4v7cgdb + tmp);
T6 = 1.0 / Vdsat;
dAlphaz_dVg = T6 * (1.0 - Alphaz * dVdsat_dVg);
dAlphaz_dVb = -T6 * (dVth_dVb + Alphaz * dVdsat_dVb);
T6 = 8.0 * Vdsat * Vdsat - 6.0 * Vdsat * Vds
+ 1.2 * Vds * Vds;
T8 = T2 / T1;
T7 = Vds - T1 - T8 * T6;
qdrn = T4 * T7;
T7 *= T9;
tmp = T8 / T1;
tmp1 = T4 * (2.0 - 4.0 * tmp * T6
+ T8 * (16.0 * Vdsat - 6.0 * Vds));
here->BSIM4v7cdgb = (T7 * dAlphaz_dVg - tmp1
* dVdsat_dVg) * dVgs_eff_dVg;
T10 = T7 * dAlphaz_dVb - tmp1 * dVdsat_dVb;
here->BSIM4v7cddb = T4 * (2.0 - (1.0 / (3.0 * T1
* T1) + 2.0 * tmp) * T6 + T8
* (6.0 * Vdsat - 2.4 * Vds));
here->BSIM4v7cdsb = -(here->BSIM4v7cdgb
+ T10 + here->BSIM4v7cddb);
T7 = 2.0 * (T1 + T3);
qbulk = -(qgate - T4 * T7);
T7 *= T9;
T0 = 4.0 * T4 * (1.0 - T5);
T12 = (-T7 * dAlphaz_dVg - T0 * dVdsat_dVg) * dVgs_eff_dVg
- here->BSIM4v7cdgb; /*4.6.2*/
T11 = -T7 * dAlphaz_dVb - T10 - T0 * dVdsat_dVb;
T10 = -4.0 * T4 * (T2 - 0.5 + 0.5 * T5)
- here->BSIM4v7cddb;
tmp = -(T10 + T11 + T12);
here->BSIM4v7cbgb = -(here->BSIM4v7cggb
+ here->BSIM4v7cdgb + T12);
here->BSIM4v7cbdb = -(here->BSIM4v7cgdb
+ here->BSIM4v7cddb + T10);
here->BSIM4v7cbsb = -(here->BSIM4v7cgsb
+ here->BSIM4v7cdsb + tmp);
}
}
else
{ /* 50/50 partitioning */
if (Vds >= Vdsat)
{ /* saturation region */
T1 = Vdsat / 3.0;
qgate = CoxWL * (Vgs_eff - Vfb
- pParam->BSIM4v7phi - T1);
T2 = -Two_Third_CoxWL * Vgst;
qbulk = -(qgate + T2);
qdrn = 0.5 * T2;
here->BSIM4v7cggb = One_Third_CoxWL * (3.0
- dVdsat_dVg) * dVgs_eff_dVg;
T2 = -One_Third_CoxWL * dVdsat_dVb;
here->BSIM4v7cgsb = -(here->BSIM4v7cggb + T2);
here->BSIM4v7cgdb = 0.0;
here->BSIM4v7cdgb = -One_Third_CoxWL * dVgs_eff_dVg;
here->BSIM4v7cddb = 0.0;
T4 = One_Third_CoxWL * dVth_dVb;
here->BSIM4v7cdsb = -(T4 + here->BSIM4v7cdgb);
here->BSIM4v7cbgb = -(here->BSIM4v7cggb
- Two_Third_CoxWL * dVgs_eff_dVg);
T3 = -(T2 + Two_Third_CoxWL * dVth_dVb);
here->BSIM4v7cbsb = -(here->BSIM4v7cbgb + T3);
here->BSIM4v7cbdb = 0.0;
}
else
{ /* linear region */
Alphaz = Vgst / Vdsat;
T1 = 2.0 * Vdsat - Vds;
T2 = Vds / (3.0 * T1);
T3 = T2 * Vds;
T9 = 0.25 * CoxWL;
T4 = T9 * Alphaz;
qgate = CoxWL * (Vgs_eff - Vfb - pParam->BSIM4v7phi
- 0.5 * (Vds - T3));
T5 = T3 / T1;
here->BSIM4v7cggb = CoxWL * (1.0 - T5 * dVdsat_dVg)
* dVgs_eff_dVg;
tmp = -CoxWL * T5 * dVdsat_dVb;
here->BSIM4v7cgdb = CoxWL * (T2 - 0.5 + 0.5 * T5);
here->BSIM4v7cgsb = -(here->BSIM4v7cggb
+ here->BSIM4v7cgdb + tmp);
T6 = 1.0 / Vdsat;
dAlphaz_dVg = T6 * (1.0 - Alphaz * dVdsat_dVg);
dAlphaz_dVb = -T6 * (dVth_dVb + Alphaz * dVdsat_dVb);
T7 = T1 + T3;
qdrn = -T4 * T7;
qbulk = - (qgate + qdrn + qdrn);
T7 *= T9;
T0 = T4 * (2.0 * T5 - 2.0);
here->BSIM4v7cdgb = (T0 * dVdsat_dVg - T7
* dAlphaz_dVg) * dVgs_eff_dVg;
T12 = T0 * dVdsat_dVb - T7 * dAlphaz_dVb;
here->BSIM4v7cddb = T4 * (1.0 - 2.0 * T2 - T5);
here->BSIM4v7cdsb = -(here->BSIM4v7cdgb + T12
+ here->BSIM4v7cddb);
here->BSIM4v7cbgb = -(here->BSIM4v7cggb
+ 2.0 * here->BSIM4v7cdgb);
here->BSIM4v7cbdb = -(here->BSIM4v7cgdb
+ 2.0 * here->BSIM4v7cddb);
here->BSIM4v7cbsb = -(here->BSIM4v7cgsb
+ 2.0 * here->BSIM4v7cdsb);
} /* end of linear region */
} /* end of 50/50 partition */
} /* end of inversion */
} /* end of capMod=0 */
else
{ if (Vbseff < 0.0)
{ VbseffCV = Vbseff;
dVbseffCV_dVb = 1.0;
}
else
{ VbseffCV = pParam->BSIM4v7phi - Phis;
dVbseffCV_dVb = -dPhis_dVb;
}
CoxWL = model->BSIM4v7coxe * pParam->BSIM4v7weffCV
* pParam->BSIM4v7leffCV * here->BSIM4v7nf;
if(model->BSIM4v7cvchargeMod == 0)
{
/* Seperate VgsteffCV with noff and voffcv */
noff = n * pParam->BSIM4v7noff;
dnoff_dVd = pParam->BSIM4v7noff * dn_dVd;
dnoff_dVb = pParam->BSIM4v7noff * dn_dVb;
T0 = Vtm * noff;
voffcv = pParam->BSIM4v7voffcv;
VgstNVt = (Vgst - voffcv) / T0;
if (VgstNVt > EXP_THRESHOLD)
{
Vgsteff = Vgst - voffcv;
dVgsteff_dVg = dVgs_eff_dVg;
dVgsteff_dVd = -dVth_dVd;
dVgsteff_dVb = -dVth_dVb;
}
else if (VgstNVt < -EXP_THRESHOLD)
{
Vgsteff = T0 * log(1.0 + MIN_EXP);
dVgsteff_dVg = 0.0;
dVgsteff_dVd = Vgsteff / noff;
dVgsteff_dVb = dVgsteff_dVd * dnoff_dVb;
dVgsteff_dVd *= dnoff_dVd;
}
else
{
ExpVgst = exp(VgstNVt);
Vgsteff = T0 * log(1.0 + ExpVgst);
dVgsteff_dVg = ExpVgst / (1.0 + ExpVgst);
dVgsteff_dVd = -dVgsteff_dVg * (dVth_dVd + (Vgst - voffcv)
/ noff * dnoff_dVd) + Vgsteff / noff * dnoff_dVd;
dVgsteff_dVb = -dVgsteff_dVg * (dVth_dVb + (Vgst - voffcv)
/ noff * dnoff_dVb) + Vgsteff / noff * dnoff_dVb;
dVgsteff_dVg *= dVgs_eff_dVg;
}
/* End of VgsteffCV for cvchargeMod = 0 */
}
else
{
T0 = n * Vtm;
T1 = pParam->BSIM4v7mstarcv * Vgst;
T2 = T1 / T0;
if (T2 > EXP_THRESHOLD)
{
T10 = T1;
dT10_dVg = pParam->BSIM4v7mstarcv * dVgs_eff_dVg;
dT10_dVd = -dVth_dVd * pParam->BSIM4v7mstarcv;
dT10_dVb = -dVth_dVb * pParam->BSIM4v7mstarcv;
}
else if (T2 < -EXP_THRESHOLD)
{
T10 = Vtm * log(1.0 + MIN_EXP);
dT10_dVg = 0.0;
dT10_dVd = T10 * dn_dVd;
dT10_dVb = T10 * dn_dVb;
T10 *= n;
}
else
{
ExpVgst = exp(T2);
T3 = Vtm * log(1.0 + ExpVgst);
T10 = n * T3;
dT10_dVg = pParam->BSIM4v7mstarcv * ExpVgst / (1.0 + ExpVgst);
dT10_dVb = T3 * dn_dVb - dT10_dVg * (dVth_dVb + Vgst * dn_dVb / n);
dT10_dVd = T3 * dn_dVd - dT10_dVg * (dVth_dVd + Vgst * dn_dVd / n);
dT10_dVg *= dVgs_eff_dVg;
}
T1 = pParam->BSIM4v7voffcbncv - (1.0 - pParam->BSIM4v7mstarcv) * Vgst;
T2 = T1 / T0;
if (T2 < -EXP_THRESHOLD)
{
T3 = model->BSIM4v7coxe * MIN_EXP / pParam->BSIM4v7cdep0;
T9 = pParam->BSIM4v7mstarcv + T3 * n;
dT9_dVg = 0.0;
dT9_dVd = dn_dVd * T3;
dT9_dVb = dn_dVb * T3;
}
else if (T2 > EXP_THRESHOLD)
{
T3 = model->BSIM4v7coxe * MAX_EXP / pParam->BSIM4v7cdep0;
T9 = pParam->BSIM4v7mstarcv + T3 * n;
dT9_dVg = 0.0;
dT9_dVd = dn_dVd * T3;
dT9_dVb = dn_dVb * T3;
}
else
{
ExpVgst = exp(T2);
T3 = model->BSIM4v7coxe / pParam->BSIM4v7cdep0;
T4 = T3 * ExpVgst;
T5 = T1 * T4 / T0;
T9 = pParam->BSIM4v7mstarcv + n * T4;
dT9_dVg = T3 * (pParam->BSIM4v7mstarcv - 1.0) * ExpVgst / Vtm;
dT9_dVb = T4 * dn_dVb - dT9_dVg * dVth_dVb - T5 * dn_dVb;
dT9_dVd = T4 * dn_dVd - dT9_dVg * dVth_dVd - T5 * dn_dVd;
dT9_dVg *= dVgs_eff_dVg;
}
Vgsteff = T10 / T9;
T11 = T9 * T9;
dVgsteff_dVg = (T9 * dT10_dVg - T10 * dT9_dVg) / T11;
dVgsteff_dVd = (T9 * dT10_dVd - T10 * dT9_dVd) / T11;
dVgsteff_dVb = (T9 * dT10_dVb - T10 * dT9_dVb) / T11;
/* End of VgsteffCV for cvchargeMod = 1 */
}
if (model->BSIM4v7capMod == 1)
{ Vfb = here->BSIM4v7vfbzb;
V3 = Vfb - Vgs_eff + VbseffCV - DELTA_3;
if (Vfb <= 0.0)
T0 = sqrt(V3 * V3 - 4.0 * DELTA_3 * Vfb);
else
T0 = sqrt(V3 * V3 + 4.0 * DELTA_3 * Vfb);
T1 = 0.5 * (1.0 + V3 / T0);
Vfbeff = Vfb - 0.5 * (V3 + T0);
dVfbeff_dVg = T1 * dVgs_eff_dVg;
dVfbeff_dVb = -T1 * dVbseffCV_dVb;
Qac0 = CoxWL * (Vfbeff - Vfb);
dQac0_dVg = CoxWL * dVfbeff_dVg;
dQac0_dVb = CoxWL * dVfbeff_dVb;
T0 = 0.5 * pParam->BSIM4v7k1ox;
T3 = Vgs_eff - Vfbeff - VbseffCV - Vgsteff;
if (pParam->BSIM4v7k1ox == 0.0)
{ T1 = 0.0;
T2 = 0.0;
}
else if (T3 < 0.0)
{ T1 = T0 + T3 / pParam->BSIM4v7k1ox;
T2 = CoxWL;
}
else
{ T1 = sqrt(T0 * T0 + T3);
T2 = CoxWL * T0 / T1;
}
Qsub0 = CoxWL * pParam->BSIM4v7k1ox * (T1 - T0);
dQsub0_dVg = T2 * (dVgs_eff_dVg - dVfbeff_dVg - dVgsteff_dVg);
dQsub0_dVd = -T2 * dVgsteff_dVd;
dQsub0_dVb = -T2 * (dVfbeff_dVb + dVbseffCV_dVb
+ dVgsteff_dVb);
AbulkCV = Abulk0 * pParam->BSIM4v7abulkCVfactor;
dAbulkCV_dVb = pParam->BSIM4v7abulkCVfactor * dAbulk0_dVb;
VdsatCV = Vgsteff / AbulkCV;
T0 = VdsatCV - Vds - DELTA_4;
dT0_dVg = 1.0 / AbulkCV;
dT0_dVb = -VdsatCV * dAbulkCV_dVb / AbulkCV;
T1 = sqrt(T0 * T0 + 4.0 * DELTA_4 * VdsatCV);
dT1_dVg = (T0 + DELTA_4 + DELTA_4) / T1;
dT1_dVd = -T0 / T1;
dT1_dVb = dT1_dVg * dT0_dVb;
dT1_dVg *= dT0_dVg;
if (T0 >= 0.0)
{ VdseffCV = VdsatCV - 0.5 * (T0 + T1);
dVdseffCV_dVg = 0.5 * (dT0_dVg - dT1_dVg);
dVdseffCV_dVd = 0.5 * (1.0 - dT1_dVd);
dVdseffCV_dVb = 0.5 * (dT0_dVb - dT1_dVb);
}
else
{ T3 = (DELTA_4 + DELTA_4) / (T1 - T0);
T4 = 1.0 - T3;
T5 = VdsatCV * T3 / (T1 - T0);
VdseffCV = VdsatCV * T4;
dVdseffCV_dVg = dT0_dVg * T4 + T5 * (dT1_dVg - dT0_dVg);
dVdseffCV_dVd = T5 * (dT1_dVd + 1.0);
dVdseffCV_dVb = dT0_dVb * (T4 - T5) + T5 * dT1_dVb;
}
if (Vds == 0.0)
{ VdseffCV = 0.0;
dVdseffCV_dVg = 0.0;
dVdseffCV_dVb = 0.0;
}
T0 = AbulkCV * VdseffCV;
T1 = 12.0 * (Vgsteff - 0.5 * T0 + 1.0e-20);
T2 = VdseffCV / T1;
T3 = T0 * T2;
T4 = (1.0 - 12.0 * T2 * T2 * AbulkCV);
T5 = (6.0 * T0 * (4.0 * Vgsteff - T0) / (T1 * T1) - 0.5);
T6 = 12.0 * T2 * T2 * Vgsteff;
qgate = CoxWL * (Vgsteff - 0.5 * VdseffCV + T3);
Cgg1 = CoxWL * (T4 + T5 * dVdseffCV_dVg);
Cgd1 = CoxWL * T5 * dVdseffCV_dVd + Cgg1 * dVgsteff_dVd;
Cgb1 = CoxWL * (T5 * dVdseffCV_dVb + T6 * dAbulkCV_dVb)
+ Cgg1 * dVgsteff_dVb;
Cgg1 *= dVgsteff_dVg;
T7 = 1.0 - AbulkCV;
qbulk = CoxWL * T7 * (0.5 * VdseffCV - T3);
T4 = -T7 * (T4 - 1.0);
T5 = -T7 * T5;
T6 = -(T7 * T6 + (0.5 * VdseffCV - T3));
Cbg1 = CoxWL * (T4 + T5 * dVdseffCV_dVg);
Cbd1 = CoxWL * T5 * dVdseffCV_dVd + Cbg1 * dVgsteff_dVd;
Cbb1 = CoxWL * (T5 * dVdseffCV_dVb + T6 * dAbulkCV_dVb)
+ Cbg1 * dVgsteff_dVb;
Cbg1 *= dVgsteff_dVg;
if (model->BSIM4v7xpart > 0.5)
{ /* 0/100 Charge petition model */
T1 = T1 + T1;
qsrc = -CoxWL * (0.5 * Vgsteff + 0.25 * T0
- T0 * T0 / T1);
T7 = (4.0 * Vgsteff - T0) / (T1 * T1);
T4 = -(0.5 + 24.0 * T0 * T0 / (T1 * T1));
T5 = -(0.25 * AbulkCV - 12.0 * AbulkCV * T0 * T7);
T6 = -(0.25 * VdseffCV - 12.0 * T0 * VdseffCV * T7);
Csg = CoxWL * (T4 + T5 * dVdseffCV_dVg);
Csd = CoxWL * T5 * dVdseffCV_dVd + Csg * dVgsteff_dVd;
Csb = CoxWL * (T5 * dVdseffCV_dVb + T6 * dAbulkCV_dVb)
+ Csg * dVgsteff_dVb;
Csg *= dVgsteff_dVg;
}
else if (model->BSIM4v7xpart < 0.5)
{ /* 40/60 Charge petition model */
T1 = T1 / 12.0;
T2 = 0.5 * CoxWL / (T1 * T1);
T3 = Vgsteff * (2.0 * T0 * T0 / 3.0 + Vgsteff
* (Vgsteff - 4.0 * T0 / 3.0))
- 2.0 * T0 * T0 * T0 / 15.0;
qsrc = -T2 * T3;
T7 = 4.0 / 3.0 * Vgsteff * (Vgsteff - T0)
+ 0.4 * T0 * T0;
T4 = -2.0 * qsrc / T1 - T2 * (Vgsteff * (3.0
* Vgsteff - 8.0 * T0 / 3.0)
+ 2.0 * T0 * T0 / 3.0);
T5 = (qsrc / T1 + T2 * T7) * AbulkCV;
T6 = (qsrc / T1 * VdseffCV + T2 * T7 * VdseffCV);
Csg = (T4 + T5 * dVdseffCV_dVg);
Csd = T5 * dVdseffCV_dVd + Csg * dVgsteff_dVd;
Csb = (T5 * dVdseffCV_dVb + T6 * dAbulkCV_dVb)
+ Csg * dVgsteff_dVb;
Csg *= dVgsteff_dVg;
}
else
{ /* 50/50 Charge petition model */
qsrc = -0.5 * (qgate + qbulk);
Csg = -0.5 * (Cgg1 + Cbg1);
Csb = -0.5 * (Cgb1 + Cbb1);
Csd = -0.5 * (Cgd1 + Cbd1);
}
qgate += Qac0 + Qsub0;
qbulk -= (Qac0 + Qsub0);
qdrn = -(qgate + qbulk + qsrc);
Cgg = dQac0_dVg + dQsub0_dVg + Cgg1;
Cgd = dQsub0_dVd + Cgd1;
Cgb = dQac0_dVb + dQsub0_dVb + Cgb1;
Cbg = Cbg1 - dQac0_dVg - dQsub0_dVg;
Cbd = Cbd1 - dQsub0_dVd;
Cbb = Cbb1 - dQac0_dVb - dQsub0_dVb;
Cgb *= dVbseff_dVb;
Cbb *= dVbseff_dVb;
Csb *= dVbseff_dVb;
here->BSIM4v7cggb = Cgg;
here->BSIM4v7cgsb = -(Cgg + Cgd + Cgb);
here->BSIM4v7cgdb = Cgd;
here->BSIM4v7cdgb = -(Cgg + Cbg + Csg);
here->BSIM4v7cdsb = (Cgg + Cgd + Cgb + Cbg + Cbd + Cbb
+ Csg + Csd + Csb);
here->BSIM4v7cddb = -(Cgd + Cbd + Csd);
here->BSIM4v7cbgb = Cbg;
here->BSIM4v7cbsb = -(Cbg + Cbd + Cbb);
here->BSIM4v7cbdb = Cbd;
}
/* Charge-Thickness capMod (CTM) begins */
else if (model->BSIM4v7capMod == 2)
{ V3 = here->BSIM4v7vfbzb - Vgs_eff + VbseffCV - DELTA_3;
if (here->BSIM4v7vfbzb <= 0.0)
T0 = sqrt(V3 * V3 - 4.0 * DELTA_3 * here->BSIM4v7vfbzb);
else
T0 = sqrt(V3 * V3 + 4.0 * DELTA_3 * here->BSIM4v7vfbzb);
T1 = 0.5 * (1.0 + V3 / T0);
Vfbeff = here->BSIM4v7vfbzb - 0.5 * (V3 + T0);
dVfbeff_dVg = T1 * dVgs_eff_dVg;
dVfbeff_dVb = -T1 * dVbseffCV_dVb;
Cox = model->BSIM4v7coxp;
Tox = 1.0e8 * model->BSIM4v7toxp;
T0 = (Vgs_eff - VbseffCV - here->BSIM4v7vfbzb) / Tox;
dT0_dVg = dVgs_eff_dVg / Tox;
dT0_dVb = -dVbseffCV_dVb / Tox;
tmp = T0 * pParam->BSIM4v7acde;
if ((-EXP_THRESHOLD < tmp) && (tmp < EXP_THRESHOLD))
{ Tcen = pParam->BSIM4v7ldeb * exp(tmp);
dTcen_dVg = pParam->BSIM4v7acde * Tcen;
dTcen_dVb = dTcen_dVg * dT0_dVb;
dTcen_dVg *= dT0_dVg;
}
else if (tmp <= -EXP_THRESHOLD)
{ Tcen = pParam->BSIM4v7ldeb * MIN_EXP;
dTcen_dVg = dTcen_dVb = 0.0;
}
else
{ Tcen = pParam->BSIM4v7ldeb * MAX_EXP;
dTcen_dVg = dTcen_dVb = 0.0;
}
LINK = 1.0e-3 * model->BSIM4v7toxp;
V3 = pParam->BSIM4v7ldeb - Tcen - LINK;
V4 = sqrt(V3 * V3 + 4.0 * LINK * pParam->BSIM4v7ldeb);
Tcen = pParam->BSIM4v7ldeb - 0.5 * (V3 + V4);
T1 = 0.5 * (1.0 + V3 / V4);
dTcen_dVg *= T1;
dTcen_dVb *= T1;
Ccen = epssub / Tcen;
T2 = Cox / (Cox + Ccen);
Coxeff = T2 * Ccen;
T3 = -Ccen / Tcen;
dCoxeff_dVg = T2 * T2 * T3;
dCoxeff_dVb = dCoxeff_dVg * dTcen_dVb;
dCoxeff_dVg *= dTcen_dVg;
CoxWLcen = CoxWL * Coxeff / model->BSIM4v7coxe;
Qac0 = CoxWLcen * (Vfbeff - here->BSIM4v7vfbzb);
QovCox = Qac0 / Coxeff;
dQac0_dVg = CoxWLcen * dVfbeff_dVg
+ QovCox * dCoxeff_dVg;
dQac0_dVb = CoxWLcen * dVfbeff_dVb
+ QovCox * dCoxeff_dVb;
T0 = 0.5 * pParam->BSIM4v7k1ox;
T3 = Vgs_eff - Vfbeff - VbseffCV - Vgsteff;
if (pParam->BSIM4v7k1ox == 0.0)
{ T1 = 0.0;
T2 = 0.0;
}
else if (T3 < 0.0)
{ T1 = T0 + T3 / pParam->BSIM4v7k1ox;
T2 = CoxWLcen;
}
else
{ T1 = sqrt(T0 * T0 + T3);
T2 = CoxWLcen * T0 / T1;
}
Qsub0 = CoxWLcen * pParam->BSIM4v7k1ox * (T1 - T0);
QovCox = Qsub0 / Coxeff;
dQsub0_dVg = T2 * (dVgs_eff_dVg - dVfbeff_dVg - dVgsteff_dVg)
+ QovCox * dCoxeff_dVg;
dQsub0_dVd = -T2 * dVgsteff_dVd;
dQsub0_dVb = -T2 * (dVfbeff_dVb + dVbseffCV_dVb + dVgsteff_dVb)
+ QovCox * dCoxeff_dVb;
/* Gate-bias dependent delta Phis begins */
if (pParam->BSIM4v7k1ox <= 0.0)
{ Denomi = 0.25 * pParam->BSIM4v7moin * Vtm;
T0 = 0.5 * pParam->BSIM4v7sqrtPhi;
}
else
{ Denomi = pParam->BSIM4v7moin * Vtm
* pParam->BSIM4v7k1ox * pParam->BSIM4v7k1ox;
T0 = pParam->BSIM4v7k1ox * pParam->BSIM4v7sqrtPhi;
}
T1 = 2.0 * T0 + Vgsteff;
DeltaPhi = Vtm * log(1.0 + T1 * Vgsteff / Denomi);
dDeltaPhi_dVg = 2.0 * Vtm * (T1 -T0) / (Denomi + T1 * Vgsteff);
/* End of delta Phis */
/* VgDP = Vgsteff - DeltaPhi */
T0 = Vgsteff - DeltaPhi - 0.001;
dT0_dVg = 1.0 - dDeltaPhi_dVg;
T1 = sqrt(T0 * T0 + Vgsteff * 0.004);
VgDP = 0.5 * (T0 + T1);
dVgDP_dVg = 0.5 * (dT0_dVg + (T0 * dT0_dVg + 0.002) / T1);
Tox += Tox; /* WDLiu: Tcen reevaluated below due to different Vgsteff */
T0 = (Vgsteff + here->BSIM4v7vtfbphi2) / Tox;
tmp = exp(model->BSIM4v7bdos * 0.7 * log(T0));
T1 = 1.0 + tmp;
T2 = model->BSIM4v7bdos * 0.7 * tmp / (T0 * Tox);
Tcen = model->BSIM4v7ados * 1.9e-9 / T1;
dTcen_dVg = -Tcen * T2 / T1;
dTcen_dVd = dTcen_dVg * dVgsteff_dVd;
dTcen_dVb = dTcen_dVg * dVgsteff_dVb;
dTcen_dVg *= dVgsteff_dVg;
Ccen = epssub / Tcen;
T0 = Cox / (Cox + Ccen);
Coxeff = T0 * Ccen;
T1 = -Ccen / Tcen;
dCoxeff_dVg = T0 * T0 * T1;
dCoxeff_dVd = dCoxeff_dVg * dTcen_dVd;
dCoxeff_dVb = dCoxeff_dVg * dTcen_dVb;
dCoxeff_dVg *= dTcen_dVg;
CoxWLcen = CoxWL * Coxeff / model->BSIM4v7coxe;
AbulkCV = Abulk0 * pParam->BSIM4v7abulkCVfactor;
dAbulkCV_dVb = pParam->BSIM4v7abulkCVfactor * dAbulk0_dVb;
VdsatCV = VgDP / AbulkCV;
T0 = VdsatCV - Vds - DELTA_4;
dT0_dVg = dVgDP_dVg / AbulkCV;
dT0_dVb = -VdsatCV * dAbulkCV_dVb / AbulkCV;
T1 = sqrt(T0 * T0 + 4.0 * DELTA_4 * VdsatCV);
dT1_dVg = (T0 + DELTA_4 + DELTA_4) / T1;
dT1_dVd = -T0 / T1;
dT1_dVb = dT1_dVg * dT0_dVb;
dT1_dVg *= dT0_dVg;
if (T0 >= 0.0)
{ VdseffCV = VdsatCV - 0.5 * (T0 + T1);
dVdseffCV_dVg = 0.5 * (dT0_dVg - dT1_dVg);
dVdseffCV_dVd = 0.5 * (1.0 - dT1_dVd);
dVdseffCV_dVb = 0.5 * (dT0_dVb - dT1_dVb);
}
else
{ T3 = (DELTA_4 + DELTA_4) / (T1 - T0);
T4 = 1.0 - T3;
T5 = VdsatCV * T3 / (T1 - T0);
VdseffCV = VdsatCV * T4;
dVdseffCV_dVg = dT0_dVg * T4 + T5 * (dT1_dVg - dT0_dVg);
dVdseffCV_dVd = T5 * (dT1_dVd + 1.0);
dVdseffCV_dVb = dT0_dVb * (T4 - T5) + T5 * dT1_dVb;
}
if (Vds == 0.0)
{ VdseffCV = 0.0;
dVdseffCV_dVg = 0.0;
dVdseffCV_dVb = 0.0;
}
T0 = AbulkCV * VdseffCV;
T1 = VgDP;
T2 = 12.0 * (T1 - 0.5 * T0 + 1.0e-20);
T3 = T0 / T2;
T4 = 1.0 - 12.0 * T3 * T3;
T5 = AbulkCV * (6.0 * T0 * (4.0 * T1 - T0) / (T2 * T2) - 0.5);
T6 = T5 * VdseffCV / AbulkCV;
qgate = CoxWLcen * (T1 - T0 * (0.5 - T3));
QovCox = qgate / Coxeff;
Cgg1 = CoxWLcen * (T4 * dVgDP_dVg
+ T5 * dVdseffCV_dVg);
Cgd1 = CoxWLcen * T5 * dVdseffCV_dVd + Cgg1
* dVgsteff_dVd + QovCox * dCoxeff_dVd;
Cgb1 = CoxWLcen * (T5 * dVdseffCV_dVb + T6 * dAbulkCV_dVb)
+ Cgg1 * dVgsteff_dVb + QovCox * dCoxeff_dVb;
Cgg1 = Cgg1 * dVgsteff_dVg + QovCox * dCoxeff_dVg;
T7 = 1.0 - AbulkCV;
T8 = T2 * T2;
T9 = 12.0 * T7 * T0 * T0 / (T8 * AbulkCV);
T10 = T9 * dVgDP_dVg;
T11 = -T7 * T5 / AbulkCV;
T12 = -(T9 * T1 / AbulkCV + VdseffCV * (0.5 - T0 / T2));
qbulk = CoxWLcen * T7 * (0.5 * VdseffCV - T0 * VdseffCV / T2);
QovCox = qbulk / Coxeff;
Cbg1 = CoxWLcen * (T10 + T11 * dVdseffCV_dVg);
Cbd1 = CoxWLcen * T11 * dVdseffCV_dVd + Cbg1
* dVgsteff_dVd + QovCox * dCoxeff_dVd;
Cbb1 = CoxWLcen * (T11 * dVdseffCV_dVb + T12 * dAbulkCV_dVb)
+ Cbg1 * dVgsteff_dVb + QovCox * dCoxeff_dVb;
Cbg1 = Cbg1 * dVgsteff_dVg + QovCox * dCoxeff_dVg;
if (model->BSIM4v7xpart > 0.5)
{ /* 0/100 partition */
qsrc = -CoxWLcen * (T1 / 2.0 + T0 / 4.0
- 0.5 * T0 * T0 / T2);
QovCox = qsrc / Coxeff;
T2 += T2;
T3 = T2 * T2;
T7 = -(0.25 - 12.0 * T0 * (4.0 * T1 - T0) / T3);
T4 = -(0.5 + 24.0 * T0 * T0 / T3) * dVgDP_dVg;
T5 = T7 * AbulkCV;
T6 = T7 * VdseffCV;
Csg = CoxWLcen * (T4 + T5 * dVdseffCV_dVg);
Csd = CoxWLcen * T5 * dVdseffCV_dVd + Csg * dVgsteff_dVd
+ QovCox * dCoxeff_dVd;
Csb = CoxWLcen * (T5 * dVdseffCV_dVb + T6 * dAbulkCV_dVb)
+ Csg * dVgsteff_dVb + QovCox * dCoxeff_dVb;
Csg = Csg * dVgsteff_dVg + QovCox * dCoxeff_dVg;
}
else if (model->BSIM4v7xpart < 0.5)
{ /* 40/60 partition */
T2 = T2 / 12.0;
T3 = 0.5 * CoxWLcen / (T2 * T2);
T4 = T1 * (2.0 * T0 * T0 / 3.0 + T1 * (T1 - 4.0
* T0 / 3.0)) - 2.0 * T0 * T0 * T0 / 15.0;
qsrc = -T3 * T4;
QovCox = qsrc / Coxeff;
T8 = 4.0 / 3.0 * T1 * (T1 - T0) + 0.4 * T0 * T0;
T5 = -2.0 * qsrc / T2 - T3 * (T1 * (3.0 * T1 - 8.0
* T0 / 3.0) + 2.0 * T0 * T0 / 3.0);
T6 = AbulkCV * (qsrc / T2 + T3 * T8);
T7 = T6 * VdseffCV / AbulkCV;
Csg = T5 * dVgDP_dVg + T6 * dVdseffCV_dVg;
Csd = Csg * dVgsteff_dVd + T6 * dVdseffCV_dVd
+ QovCox * dCoxeff_dVd;
Csb = Csg * dVgsteff_dVb + T6 * dVdseffCV_dVb
+ T7 * dAbulkCV_dVb + QovCox * dCoxeff_dVb;
Csg = Csg * dVgsteff_dVg + QovCox * dCoxeff_dVg;
}
else
{ /* 50/50 partition */
qsrc = -0.5 * qgate;
Csg = -0.5 * Cgg1;
Csd = -0.5 * Cgd1;
Csb = -0.5 * Cgb1;
}
qgate += Qac0 + Qsub0 - qbulk;
qbulk -= (Qac0 + Qsub0);
qdrn = -(qgate + qbulk + qsrc);
Cbg = Cbg1 - dQac0_dVg - dQsub0_dVg;
Cbd = Cbd1 - dQsub0_dVd;
Cbb = Cbb1 - dQac0_dVb - dQsub0_dVb;
Cgg = Cgg1 - Cbg;
Cgd = Cgd1 - Cbd;
Cgb = Cgb1 - Cbb;
Cgb *= dVbseff_dVb;
Cbb *= dVbseff_dVb;
Csb *= dVbseff_dVb;
here->BSIM4v7cggb = Cgg;
here->BSIM4v7cgsb = -(Cgg + Cgd + Cgb);
here->BSIM4v7cgdb = Cgd;
here->BSIM4v7cdgb = -(Cgg + Cbg + Csg);
here->BSIM4v7cdsb = (Cgg + Cgd + Cgb + Cbg + Cbd + Cbb
+ Csg + Csd + Csb);
here->BSIM4v7cddb = -(Cgd + Cbd + Csd);
here->BSIM4v7cbgb = Cbg;
here->BSIM4v7cbsb = -(Cbg + Cbd + Cbb);
here->BSIM4v7cbdb = Cbd;
} /* End of CTM */
}
here->BSIM4v7csgb = - here->BSIM4v7cggb - here->BSIM4v7cdgb - here->BSIM4v7cbgb;
here->BSIM4v7csdb = - here->BSIM4v7cgdb - here->BSIM4v7cddb - here->BSIM4v7cbdb;
here->BSIM4v7cssb = - here->BSIM4v7cgsb - here->BSIM4v7cdsb - here->BSIM4v7cbsb;
here->BSIM4v7cgbb = - here->BSIM4v7cgdb - here->BSIM4v7cggb - here->BSIM4v7cgsb;
here->BSIM4v7cdbb = - here->BSIM4v7cddb - here->BSIM4v7cdgb - here->BSIM4v7cdsb;
here->BSIM4v7cbbb = - here->BSIM4v7cbgb - here->BSIM4v7cbdb - here->BSIM4v7cbsb;
here->BSIM4v7csbb = - here->BSIM4v7cgbb - here->BSIM4v7cdbb - here->BSIM4v7cbbb;
here->BSIM4v7qgate = qgate;
here->BSIM4v7qbulk = qbulk;
here->BSIM4v7qdrn = qdrn;
here->BSIM4v7qsrc = -(qgate + qbulk + qdrn);
/* NQS begins */
if ((here->BSIM4v7trnqsMod) || (here->BSIM4v7acnqsMod))
{ here->BSIM4v7qchqs = qcheq = -(qbulk + qgate);
here->BSIM4v7cqgb = -(here->BSIM4v7cggb + here->BSIM4v7cbgb);
here->BSIM4v7cqdb = -(here->BSIM4v7cgdb + here->BSIM4v7cbdb);
here->BSIM4v7cqsb = -(here->BSIM4v7cgsb + here->BSIM4v7cbsb);
here->BSIM4v7cqbb = -(here->BSIM4v7cqgb + here->BSIM4v7cqdb
+ here->BSIM4v7cqsb);
CoxWL = model->BSIM4v7coxe * pParam->BSIM4v7weffCV * here->BSIM4v7nf
* pParam->BSIM4v7leffCV;
T1 = here->BSIM4v7gcrg / CoxWL; /* 1 / tau */
here->BSIM4v7gtau = T1 * ScalingFactor;
if (here->BSIM4v7acnqsMod)
here->BSIM4v7taunet = 1.0 / T1;
*(ckt->CKTstate0 + here->BSIM4v7qcheq) = qcheq;
if (ckt->CKTmode & MODEINITTRAN)
*(ckt->CKTstate1 + here->BSIM4v7qcheq) =
*(ckt->CKTstate0 + here->BSIM4v7qcheq);
if (here->BSIM4v7trnqsMod)
{ error = NIintegrate(ckt, &geq, &ceq, 0.0, here->BSIM4v7qcheq);
if (error)
return(error);
}
}
finished:
/* Calculate junction C-V */
if (ChargeComputationNeeded)
{ czbd = model->BSIM4v7DunitAreaTempJctCap * here->BSIM4v7Adeff; /* bug fix */
czbs = model->BSIM4v7SunitAreaTempJctCap * here->BSIM4v7Aseff;
czbdsw = model->BSIM4v7DunitLengthSidewallTempJctCap * here->BSIM4v7Pdeff;
czbdswg = model->BSIM4v7DunitLengthGateSidewallTempJctCap
* pParam->BSIM4v7weffCJ * here->BSIM4v7nf;
czbssw = model->BSIM4v7SunitLengthSidewallTempJctCap * here->BSIM4v7Pseff;
czbsswg = model->BSIM4v7SunitLengthGateSidewallTempJctCap
* pParam->BSIM4v7weffCJ * here->BSIM4v7nf;
MJS = model->BSIM4v7SbulkJctBotGradingCoeff;
MJSWS = model->BSIM4v7SbulkJctSideGradingCoeff;
MJSWGS = model->BSIM4v7SbulkJctGateSideGradingCoeff;
MJD = model->BSIM4v7DbulkJctBotGradingCoeff;
MJSWD = model->BSIM4v7DbulkJctSideGradingCoeff;
MJSWGD = model->BSIM4v7DbulkJctGateSideGradingCoeff;
/* Source Bulk Junction */
if (vbs_jct == 0.0)
{ *(ckt->CKTstate0 + here->BSIM4v7qbs) = 0.0;
here->BSIM4v7capbs = czbs + czbssw + czbsswg;
}
else if (vbs_jct < 0.0)
{ if (czbs > 0.0)
{ arg = 1.0 - vbs_jct / model->BSIM4v7PhiBS;
if (MJS == 0.5)
sarg = 1.0 / sqrt(arg);
else
sarg = exp(-MJS * log(arg));
*(ckt->CKTstate0 + here->BSIM4v7qbs) = model->BSIM4v7PhiBS * czbs
* (1.0 - arg * sarg) / (1.0 - MJS);
here->BSIM4v7capbs = czbs * sarg;
}
else
{ *(ckt->CKTstate0 + here->BSIM4v7qbs) = 0.0;
here->BSIM4v7capbs = 0.0;
}
if (czbssw > 0.0)
{ arg = 1.0 - vbs_jct / model->BSIM4v7PhiBSWS;
if (MJSWS == 0.5)
sarg = 1.0 / sqrt(arg);
else
sarg = exp(-MJSWS * log(arg));
*(ckt->CKTstate0 + here->BSIM4v7qbs) += model->BSIM4v7PhiBSWS * czbssw
* (1.0 - arg * sarg) / (1.0 - MJSWS);
here->BSIM4v7capbs += czbssw * sarg;
}
if (czbsswg > 0.0)
{ arg = 1.0 - vbs_jct / model->BSIM4v7PhiBSWGS;
if (MJSWGS == 0.5)
sarg = 1.0 / sqrt(arg);
else
sarg = exp(-MJSWGS * log(arg));
*(ckt->CKTstate0 + here->BSIM4v7qbs) += model->BSIM4v7PhiBSWGS * czbsswg
* (1.0 - arg * sarg) / (1.0 - MJSWGS);
here->BSIM4v7capbs += czbsswg * sarg;
}
}
else
{ T0 = czbs + czbssw + czbsswg;
T1 = vbs_jct * (czbs * MJS / model->BSIM4v7PhiBS + czbssw * MJSWS
/ model->BSIM4v7PhiBSWS + czbsswg * MJSWGS / model->BSIM4v7PhiBSWGS);
*(ckt->CKTstate0 + here->BSIM4v7qbs) = vbs_jct * (T0 + 0.5 * T1);
here->BSIM4v7capbs = T0 + T1;
}
/* Drain Bulk Junction */
if (vbd_jct == 0.0)
{ *(ckt->CKTstate0 + here->BSIM4v7qbd) = 0.0;
here->BSIM4v7capbd = czbd + czbdsw + czbdswg;
}
else if (vbd_jct < 0.0)
{ if (czbd > 0.0)
{ arg = 1.0 - vbd_jct / model->BSIM4v7PhiBD;
if (MJD == 0.5)
sarg = 1.0 / sqrt(arg);
else
sarg = exp(-MJD * log(arg));
*(ckt->CKTstate0 + here->BSIM4v7qbd) = model->BSIM4v7PhiBD* czbd
* (1.0 - arg * sarg) / (1.0 - MJD);
here->BSIM4v7capbd = czbd * sarg;
}
else
{ *(ckt->CKTstate0 + here->BSIM4v7qbd) = 0.0;
here->BSIM4v7capbd = 0.0;
}
if (czbdsw > 0.0)
{ arg = 1.0 - vbd_jct / model->BSIM4v7PhiBSWD;
if (MJSWD == 0.5)
sarg = 1.0 / sqrt(arg);
else
sarg = exp(-MJSWD * log(arg));
*(ckt->CKTstate0 + here->BSIM4v7qbd) += model->BSIM4v7PhiBSWD * czbdsw
* (1.0 - arg * sarg) / (1.0 - MJSWD);
here->BSIM4v7capbd += czbdsw * sarg;
}
if (czbdswg > 0.0)
{ arg = 1.0 - vbd_jct / model->BSIM4v7PhiBSWGD;
if (MJSWGD == 0.5)
sarg = 1.0 / sqrt(arg);
else
sarg = exp(-MJSWGD * log(arg));
*(ckt->CKTstate0 + here->BSIM4v7qbd) += model->BSIM4v7PhiBSWGD * czbdswg
* (1.0 - arg * sarg) / (1.0 - MJSWGD);
here->BSIM4v7capbd += czbdswg * sarg;
}
}
else
{ T0 = czbd + czbdsw + czbdswg;
T1 = vbd_jct * (czbd * MJD / model->BSIM4v7PhiBD + czbdsw * MJSWD
/ model->BSIM4v7PhiBSWD + czbdswg * MJSWGD / model->BSIM4v7PhiBSWGD);
*(ckt->CKTstate0 + here->BSIM4v7qbd) = vbd_jct * (T0 + 0.5 * T1);
here->BSIM4v7capbd = T0 + T1;
}
}
/*
* check convergence
*/
if ((here->BSIM4v7off == 0) || (!(ckt->CKTmode & MODEINITFIX)))
{ if (Check == 1)
{ ckt->CKTnoncon++;
#ifndef NEWCONV
}
else
{ if (here->BSIM4v7mode >= 0)
{ Idtot = here->BSIM4v7cd + here->BSIM4v7csub
+ here->BSIM4v7Igidl - here->BSIM4v7cbd;
}
else
{ Idtot = here->BSIM4v7cd + here->BSIM4v7cbd - here->BSIM4v7Igidl; /* bugfix */
}
tol0 = ckt->CKTreltol * MAX(fabs(cdhat), fabs(Idtot))
+ ckt->CKTabstol;
tol1 = ckt->CKTreltol * MAX(fabs(cseshat), fabs(Isestot))
+ ckt->CKTabstol;
tol2 = ckt->CKTreltol * MAX(fabs(cdedhat), fabs(Idedtot))
+ ckt->CKTabstol;
tol3 = ckt->CKTreltol * MAX(fabs(cgshat), fabs(Igstot))
+ ckt->CKTabstol;
tol4 = ckt->CKTreltol * MAX(fabs(cgdhat), fabs(Igdtot))
+ ckt->CKTabstol;
tol5 = ckt->CKTreltol * MAX(fabs(cgbhat), fabs(Igbtot))
+ ckt->CKTabstol;
if ((fabs(cdhat - Idtot) >= tol0) || (fabs(cseshat - Isestot) >= tol1)
|| (fabs(cdedhat - Idedtot) >= tol2))
{ ckt->CKTnoncon++;
}
else if ((fabs(cgshat - Igstot) >= tol3) || (fabs(cgdhat - Igdtot) >= tol4)
|| (fabs(cgbhat - Igbtot) >= tol5))
{ ckt->CKTnoncon++;
}
else
{ Ibtot = here->BSIM4v7cbs + here->BSIM4v7cbd
- here->BSIM4v7Igidl - here->BSIM4v7Igisl - here->BSIM4v7csub;
tol6 = ckt->CKTreltol * MAX(fabs(cbhat), fabs(Ibtot))
+ ckt->CKTabstol;
if (fabs(cbhat - Ibtot) > tol6)
{ ckt->CKTnoncon++;
}
}
#endif /* NEWCONV */
}
}
*(ckt->CKTstate0 + here->BSIM4v7vds) = vds;
*(ckt->CKTstate0 + here->BSIM4v7vgs) = vgs;
*(ckt->CKTstate0 + here->BSIM4v7vbs) = vbs;
*(ckt->CKTstate0 + here->BSIM4v7vbd) = vbd;
*(ckt->CKTstate0 + here->BSIM4v7vges) = vges;
*(ckt->CKTstate0 + here->BSIM4v7vgms) = vgms;
*(ckt->CKTstate0 + here->BSIM4v7vdbs) = vdbs;
*(ckt->CKTstate0 + here->BSIM4v7vdbd) = vdbd;
*(ckt->CKTstate0 + here->BSIM4v7vsbs) = vsbs;
*(ckt->CKTstate0 + here->BSIM4v7vses) = vses;
*(ckt->CKTstate0 + here->BSIM4v7vdes) = vdes;
*(ckt->CKTstate0 + here->BSIM4v7qdef) = qdef;
if (!ChargeComputationNeeded)
goto line850;
if (here->BSIM4v7rgateMod == 3)
{
vgdx = vgmd;
vgsx = vgms;
}
else /* For rgateMod == 0, 1 and 2 */
{
vgdx = vgd;
vgsx = vgs;
}
if (model->BSIM4v7capMod == 0)
{
cgdo = pParam->BSIM4v7cgdo;
qgdo = pParam->BSIM4v7cgdo * vgdx;
cgso = pParam->BSIM4v7cgso;
qgso = pParam->BSIM4v7cgso * vgsx;
}
else /* For both capMod == 1 and 2 */
{ T0 = vgdx + DELTA_1;
T1 = sqrt(T0 * T0 + 4.0 * DELTA_1);
T2 = 0.5 * (T0 - T1);
T3 = pParam->BSIM4v7weffCV * pParam->BSIM4v7cgdl;
T4 = sqrt(1.0 - 4.0 * T2 / pParam->BSIM4v7ckappad);
cgdo = pParam->BSIM4v7cgdo + T3 - T3 * (1.0 - 1.0 / T4)
* (0.5 - 0.5 * T0 / T1);
qgdo = (pParam->BSIM4v7cgdo + T3) * vgdx - T3 * (T2
+ 0.5 * pParam->BSIM4v7ckappad * (T4 - 1.0));
T0 = vgsx + DELTA_1;
T1 = sqrt(T0 * T0 + 4.0 * DELTA_1);
T2 = 0.5 * (T0 - T1);
T3 = pParam->BSIM4v7weffCV * pParam->BSIM4v7cgsl;
T4 = sqrt(1.0 - 4.0 * T2 / pParam->BSIM4v7ckappas);
cgso = pParam->BSIM4v7cgso + T3 - T3 * (1.0 - 1.0 / T4)
* (0.5 - 0.5 * T0 / T1);
qgso = (pParam->BSIM4v7cgso + T3) * vgsx - T3 * (T2
+ 0.5 * pParam->BSIM4v7ckappas * (T4 - 1.0));
}
if (here->BSIM4v7nf != 1.0)
{ cgdo *= here->BSIM4v7nf;
cgso *= here->BSIM4v7nf;
qgdo *= here->BSIM4v7nf;
qgso *= here->BSIM4v7nf;
}
here->BSIM4v7cgdo = cgdo;
here->BSIM4v7qgdo = qgdo;
here->BSIM4v7cgso = cgso;
here->BSIM4v7qgso = qgso;
#ifndef NOBYPASS
line755:
#endif
ag0 = ckt->CKTag[0];
if (here->BSIM4v7mode > 0)
{ if (here->BSIM4v7trnqsMod == 0)
{ qdrn -= qgdo;
if (here->BSIM4v7rgateMod == 3)
{ gcgmgmb = (cgdo + cgso + pParam->BSIM4v7cgbo) * ag0;
gcgmdb = -cgdo * ag0;
gcgmsb = -cgso * ag0;
gcgmbb = -pParam->BSIM4v7cgbo * ag0;
gcdgmb = gcgmdb;
gcsgmb = gcgmsb;
gcbgmb = gcgmbb;
gcggb = here->BSIM4v7cggb * ag0;
gcgdb = here->BSIM4v7cgdb * ag0;
gcgsb = here->BSIM4v7cgsb * ag0;
gcgbb = -(gcggb + gcgdb + gcgsb);
gcdgb = here->BSIM4v7cdgb * ag0;
gcsgb = -(here->BSIM4v7cggb + here->BSIM4v7cbgb
+ here->BSIM4v7cdgb) * ag0;
gcbgb = here->BSIM4v7cbgb * ag0;
qgmb = pParam->BSIM4v7cgbo * vgmb;
qgmid = qgdo + qgso + qgmb;
qbulk -= qgmb;
qsrc = -(qgate + qgmid + qbulk + qdrn);
}
else
{ gcggb = (here->BSIM4v7cggb + cgdo + cgso
+ pParam->BSIM4v7cgbo ) * ag0;
gcgdb = (here->BSIM4v7cgdb - cgdo) * ag0;
gcgsb = (here->BSIM4v7cgsb - cgso) * ag0;
gcgbb = -(gcggb + gcgdb + gcgsb);
gcdgb = (here->BSIM4v7cdgb - cgdo) * ag0;
gcsgb = -(here->BSIM4v7cggb + here->BSIM4v7cbgb
+ here->BSIM4v7cdgb + cgso) * ag0;
gcbgb = (here->BSIM4v7cbgb - pParam->BSIM4v7cgbo) * ag0;
gcdgmb = gcsgmb = gcbgmb = 0.0;
qgb = pParam->BSIM4v7cgbo * vgb;
qgate += qgdo + qgso + qgb;
qbulk -= qgb;
qsrc = -(qgate + qbulk + qdrn);
}
gcddb = (here->BSIM4v7cddb + here->BSIM4v7capbd + cgdo) * ag0;
gcdsb = here->BSIM4v7cdsb * ag0;
gcsdb = -(here->BSIM4v7cgdb + here->BSIM4v7cbdb
+ here->BSIM4v7cddb) * ag0;
gcssb = (here->BSIM4v7capbs + cgso - (here->BSIM4v7cgsb
+ here->BSIM4v7cbsb + here->BSIM4v7cdsb)) * ag0;
if (!here->BSIM4v7rbodyMod)
{ gcdbb = -(gcdgb + gcddb + gcdsb + gcdgmb);
gcsbb = -(gcsgb + gcsdb + gcssb + gcsgmb);
gcbdb = (here->BSIM4v7cbdb - here->BSIM4v7capbd) * ag0;
gcbsb = (here->BSIM4v7cbsb - here->BSIM4v7capbs) * ag0;
gcdbdb = 0.0; gcsbsb = 0.0;
}
else
{ gcdbb = -(here->BSIM4v7cddb + here->BSIM4v7cdgb
+ here->BSIM4v7cdsb) * ag0;
gcsbb = -(gcsgb + gcsdb + gcssb + gcsgmb)
+ here->BSIM4v7capbs * ag0;
gcbdb = here->BSIM4v7cbdb * ag0;
gcbsb = here->BSIM4v7cbsb * ag0;
gcdbdb = -here->BSIM4v7capbd * ag0;
gcsbsb = -here->BSIM4v7capbs * ag0;
}
gcbbb = -(gcbdb + gcbgb + gcbsb + gcbgmb);
ggtg = ggtd = ggtb = ggts = 0.0;
sxpart = 0.6;
dxpart = 0.4;
ddxpart_dVd = ddxpart_dVg = ddxpart_dVb = ddxpart_dVs = 0.0;
dsxpart_dVd = dsxpart_dVg = dsxpart_dVb = dsxpart_dVs = 0.0;
}
else
{ qcheq = here->BSIM4v7qchqs;
CoxWL = model->BSIM4v7coxe * pParam->BSIM4v7weffCV * here->BSIM4v7nf
* pParam->BSIM4v7leffCV;
T0 = qdef * ScalingFactor / CoxWL;
ggtg = here->BSIM4v7gtg = T0 * here->BSIM4v7gcrgg;
ggtd = here->BSIM4v7gtd = T0 * here->BSIM4v7gcrgd;
ggts = here->BSIM4v7gts = T0 * here->BSIM4v7gcrgs;
ggtb = here->BSIM4v7gtb = T0 * here->BSIM4v7gcrgb;
gqdef = ScalingFactor * ag0;
gcqgb = here->BSIM4v7cqgb * ag0;
gcqdb = here->BSIM4v7cqdb * ag0;
gcqsb = here->BSIM4v7cqsb * ag0;
gcqbb = here->BSIM4v7cqbb * ag0;
if (fabs(qcheq) <= 1.0e-5 * CoxWL)
{ if (model->BSIM4v7xpart < 0.5)
{ dxpart = 0.4;
}
else if (model->BSIM4v7xpart > 0.5)
{ dxpart = 0.0;
}
else
{ dxpart = 0.5;
}
ddxpart_dVd = ddxpart_dVg = ddxpart_dVb
= ddxpart_dVs = 0.0;
}
else
{ dxpart = qdrn / qcheq;
Cdd = here->BSIM4v7cddb;
Csd = -(here->BSIM4v7cgdb + here->BSIM4v7cddb
+ here->BSIM4v7cbdb);
ddxpart_dVd = (Cdd - dxpart * (Cdd + Csd)) / qcheq;
Cdg = here->BSIM4v7cdgb;
Csg = -(here->BSIM4v7cggb + here->BSIM4v7cdgb
+ here->BSIM4v7cbgb);
ddxpart_dVg = (Cdg - dxpart * (Cdg + Csg)) / qcheq;
Cds = here->BSIM4v7cdsb;
Css = -(here->BSIM4v7cgsb + here->BSIM4v7cdsb
+ here->BSIM4v7cbsb);
ddxpart_dVs = (Cds - dxpart * (Cds + Css)) / qcheq;
ddxpart_dVb = -(ddxpart_dVd + ddxpart_dVg + ddxpart_dVs);
}
sxpart = 1.0 - dxpart;
dsxpart_dVd = -ddxpart_dVd;
dsxpart_dVg = -ddxpart_dVg;
dsxpart_dVs = -ddxpart_dVs;
dsxpart_dVb = -(dsxpart_dVd + dsxpart_dVg + dsxpart_dVs);
if (here->BSIM4v7rgateMod == 3)
{ gcgmgmb = (cgdo + cgso + pParam->BSIM4v7cgbo) * ag0;
gcgmdb = -cgdo * ag0;
gcgmsb = -cgso * ag0;
gcgmbb = -pParam->BSIM4v7cgbo * ag0;
gcdgmb = gcgmdb;
gcsgmb = gcgmsb;
gcbgmb = gcgmbb;
gcdgb = gcsgb = gcbgb = 0.0;
gcggb = gcgdb = gcgsb = gcgbb = 0.0;
qgmb = pParam->BSIM4v7cgbo * vgmb;
qgmid = qgdo + qgso + qgmb;
qgate = 0.0;
qbulk = -qgmb;
qdrn = -qgdo;
qsrc = -(qgmid + qbulk + qdrn);
}
else
{ gcggb = (cgdo + cgso + pParam->BSIM4v7cgbo ) * ag0;
gcgdb = -cgdo * ag0;
gcgsb = -cgso * ag0;
gcgbb = -pParam->BSIM4v7cgbo * ag0;
gcdgb = gcgdb;
gcsgb = gcgsb;
gcbgb = gcgbb;
gcdgmb = gcsgmb = gcbgmb = 0.0;
qgb = pParam->BSIM4v7cgbo * vgb;
qgate = qgdo + qgso + qgb;
qbulk = -qgb;
qdrn = -qgdo;
qsrc = -(qgate + qbulk + qdrn);
}
gcddb = (here->BSIM4v7capbd + cgdo) * ag0;
gcdsb = gcsdb = 0.0;
gcssb = (here->BSIM4v7capbs + cgso) * ag0;
if (!here->BSIM4v7rbodyMod)
{ gcdbb = -(gcdgb + gcddb + gcdgmb);
gcsbb = -(gcsgb + gcssb + gcsgmb);
gcbdb = -here->BSIM4v7capbd * ag0;
gcbsb = -here->BSIM4v7capbs * ag0;
gcdbdb = 0.0; gcsbsb = 0.0;
}
else
{ gcdbb = gcsbb = gcbdb = gcbsb = 0.0;
gcdbdb = -here->BSIM4v7capbd * ag0;
gcsbsb = -here->BSIM4v7capbs * ag0;
}
gcbbb = -(gcbdb + gcbgb + gcbsb + gcbgmb);
}
}
else
{ if (here->BSIM4v7trnqsMod == 0)
{ qsrc = qdrn - qgso;
if (here->BSIM4v7rgateMod == 3)
{ gcgmgmb = (cgdo + cgso + pParam->BSIM4v7cgbo) * ag0;
gcgmdb = -cgdo * ag0;
gcgmsb = -cgso * ag0;
gcgmbb = -pParam->BSIM4v7cgbo * ag0;
gcdgmb = gcgmdb;
gcsgmb = gcgmsb;
gcbgmb = gcgmbb;
gcggb = here->BSIM4v7cggb * ag0;
gcgdb = here->BSIM4v7cgsb * ag0;
gcgsb = here->BSIM4v7cgdb * ag0;
gcgbb = -(gcggb + gcgdb + gcgsb);
gcdgb = -(here->BSIM4v7cggb + here->BSIM4v7cbgb
+ here->BSIM4v7cdgb) * ag0;
gcsgb = here->BSIM4v7cdgb * ag0;
gcbgb = here->BSIM4v7cbgb * ag0;
qgmb = pParam->BSIM4v7cgbo * vgmb;
qgmid = qgdo + qgso + qgmb;
qbulk -= qgmb;
qdrn = -(qgate + qgmid + qbulk + qsrc);
}
else
{ gcggb = (here->BSIM4v7cggb + cgdo + cgso
+ pParam->BSIM4v7cgbo ) * ag0;
gcgdb = (here->BSIM4v7cgsb - cgdo) * ag0;
gcgsb = (here->BSIM4v7cgdb - cgso) * ag0;
gcgbb = -(gcggb + gcgdb + gcgsb);
gcdgb = -(here->BSIM4v7cggb + here->BSIM4v7cbgb
+ here->BSIM4v7cdgb + cgdo) * ag0;
gcsgb = (here->BSIM4v7cdgb - cgso) * ag0;
gcbgb = (here->BSIM4v7cbgb - pParam->BSIM4v7cgbo) * ag0;
gcdgmb = gcsgmb = gcbgmb = 0.0;
qgb = pParam->BSIM4v7cgbo * vgb;
qgate += qgdo + qgso + qgb;
qbulk -= qgb;
qdrn = -(qgate + qbulk + qsrc);
}
gcddb = (here->BSIM4v7capbd + cgdo - (here->BSIM4v7cgsb
+ here->BSIM4v7cbsb + here->BSIM4v7cdsb)) * ag0;
gcdsb = -(here->BSIM4v7cgdb + here->BSIM4v7cbdb
+ here->BSIM4v7cddb) * ag0;
gcsdb = here->BSIM4v7cdsb * ag0;
gcssb = (here->BSIM4v7cddb + here->BSIM4v7capbs + cgso) * ag0;
if (!here->BSIM4v7rbodyMod)
{ gcdbb = -(gcdgb + gcddb + gcdsb + gcdgmb);
gcsbb = -(gcsgb + gcsdb + gcssb + gcsgmb);
gcbdb = (here->BSIM4v7cbsb - here->BSIM4v7capbd) * ag0;
gcbsb = (here->BSIM4v7cbdb - here->BSIM4v7capbs) * ag0;
gcdbdb = 0.0; gcsbsb = 0.0;
}
else
{ gcdbb = -(gcdgb + gcddb + gcdsb + gcdgmb)
+ here->BSIM4v7capbd * ag0;
gcsbb = -(here->BSIM4v7cddb + here->BSIM4v7cdgb
+ here->BSIM4v7cdsb) * ag0;
gcbdb = here->BSIM4v7cbsb * ag0;
gcbsb = here->BSIM4v7cbdb * ag0;
gcdbdb = -here->BSIM4v7capbd * ag0;
gcsbsb = -here->BSIM4v7capbs * ag0;
}
gcbbb = -(gcbgb + gcbdb + gcbsb + gcbgmb);
ggtg = ggtd = ggtb = ggts = 0.0;
sxpart = 0.4;
dxpart = 0.6;
ddxpart_dVd = ddxpart_dVg = ddxpart_dVb = ddxpart_dVs = 0.0;
dsxpart_dVd = dsxpart_dVg = dsxpart_dVb = dsxpart_dVs = 0.0;
}
else
{ qcheq = here->BSIM4v7qchqs;
CoxWL = model->BSIM4v7coxe * pParam->BSIM4v7weffCV * here->BSIM4v7nf
* pParam->BSIM4v7leffCV;
T0 = qdef * ScalingFactor / CoxWL;
ggtg = here->BSIM4v7gtg = T0 * here->BSIM4v7gcrgg;
ggts = here->BSIM4v7gts = T0 * here->BSIM4v7gcrgd;
ggtd = here->BSIM4v7gtd = T0 * here->BSIM4v7gcrgs;
ggtb = here->BSIM4v7gtb = T0 * here->BSIM4v7gcrgb;
gqdef = ScalingFactor * ag0;
gcqgb = here->BSIM4v7cqgb * ag0;
gcqdb = here->BSIM4v7cqsb * ag0;
gcqsb = here->BSIM4v7cqdb * ag0;
gcqbb = here->BSIM4v7cqbb * ag0;
if (fabs(qcheq) <= 1.0e-5 * CoxWL)
{ if (model->BSIM4v7xpart < 0.5)
{ sxpart = 0.4;
}
else if (model->BSIM4v7xpart > 0.5)
{ sxpart = 0.0;
}
else
{ sxpart = 0.5;
}
dsxpart_dVd = dsxpart_dVg = dsxpart_dVb
= dsxpart_dVs = 0.0;
}
else
{ sxpart = qdrn / qcheq;
Css = here->BSIM4v7cddb;
Cds = -(here->BSIM4v7cgdb + here->BSIM4v7cddb
+ here->BSIM4v7cbdb);
dsxpart_dVs = (Css - sxpart * (Css + Cds)) / qcheq;
Csg = here->BSIM4v7cdgb;
Cdg = -(here->BSIM4v7cggb + here->BSIM4v7cdgb
+ here->BSIM4v7cbgb);
dsxpart_dVg = (Csg - sxpart * (Csg + Cdg)) / qcheq;
Csd = here->BSIM4v7cdsb;
Cdd = -(here->BSIM4v7cgsb + here->BSIM4v7cdsb
+ here->BSIM4v7cbsb);
dsxpart_dVd = (Csd - sxpart * (Csd + Cdd)) / qcheq;
dsxpart_dVb = -(dsxpart_dVd + dsxpart_dVg + dsxpart_dVs);
}
dxpart = 1.0 - sxpart;
ddxpart_dVd = -dsxpart_dVd;
ddxpart_dVg = -dsxpart_dVg;
ddxpart_dVs = -dsxpart_dVs;
ddxpart_dVb = -(ddxpart_dVd + ddxpart_dVg + ddxpart_dVs);
if (here->BSIM4v7rgateMod == 3)
{ gcgmgmb = (cgdo + cgso + pParam->BSIM4v7cgbo) * ag0;
gcgmdb = -cgdo * ag0;
gcgmsb = -cgso * ag0;
gcgmbb = -pParam->BSIM4v7cgbo * ag0;
gcdgmb = gcgmdb;
gcsgmb = gcgmsb;
gcbgmb = gcgmbb;
gcdgb = gcsgb = gcbgb = 0.0;
gcggb = gcgdb = gcgsb = gcgbb = 0.0;
qgmb = pParam->BSIM4v7cgbo * vgmb;
qgmid = qgdo + qgso + qgmb;
qgate = 0.0;
qbulk = -qgmb;
qdrn = -qgdo;
qsrc = -qgso;
}
else
{ gcggb = (cgdo + cgso + pParam->BSIM4v7cgbo ) * ag0;
gcgdb = -cgdo * ag0;
gcgsb = -cgso * ag0;
gcgbb = -pParam->BSIM4v7cgbo * ag0;
gcdgb = gcgdb;
gcsgb = gcgsb;
gcbgb = gcgbb;
gcdgmb = gcsgmb = gcbgmb = 0.0;
qgb = pParam->BSIM4v7cgbo * vgb;
qgate = qgdo + qgso + qgb;
qbulk = -qgb;
qdrn = -qgdo;
qsrc = -qgso;
}
gcddb = (here->BSIM4v7capbd + cgdo) * ag0;
gcdsb = gcsdb = 0.0;
gcssb = (here->BSIM4v7capbs + cgso) * ag0;
if (!here->BSIM4v7rbodyMod)
{ gcdbb = -(gcdgb + gcddb + gcdgmb);
gcsbb = -(gcsgb + gcssb + gcsgmb);
gcbdb = -here->BSIM4v7capbd * ag0;
gcbsb = -here->BSIM4v7capbs * ag0;
gcdbdb = 0.0; gcsbsb = 0.0;
}
else
{ gcdbb = gcsbb = gcbdb = gcbsb = 0.0;
gcdbdb = -here->BSIM4v7capbd * ag0;
gcsbsb = -here->BSIM4v7capbs * ag0;
}
gcbbb = -(gcbdb + gcbgb + gcbsb + gcbgmb);
}
}
if (here->BSIM4v7trnqsMod)
{ *(ckt->CKTstate0 + here->BSIM4v7qcdump) = qdef * ScalingFactor;
if (ckt->CKTmode & MODEINITTRAN)
*(ckt->CKTstate1 + here->BSIM4v7qcdump) =
*(ckt->CKTstate0 + here->BSIM4v7qcdump);
error = NIintegrate(ckt, &geq, &ceq, 0.0, here->BSIM4v7qcdump);
if (error)
return(error);
}
if (ByPass) goto line860;
*(ckt->CKTstate0 + here->BSIM4v7qg) = qgate;
*(ckt->CKTstate0 + here->BSIM4v7qd) = qdrn
- *(ckt->CKTstate0 + here->BSIM4v7qbd);
*(ckt->CKTstate0 + here->BSIM4v7qs) = qsrc
- *(ckt->CKTstate0 + here->BSIM4v7qbs);
if (here->BSIM4v7rgateMod == 3)
*(ckt->CKTstate0 + here->BSIM4v7qgmid) = qgmid;
if (!here->BSIM4v7rbodyMod)
{ *(ckt->CKTstate0 + here->BSIM4v7qb) = qbulk
+ *(ckt->CKTstate0 + here->BSIM4v7qbd)
+ *(ckt->CKTstate0 + here->BSIM4v7qbs);
}
else
*(ckt->CKTstate0 + here->BSIM4v7qb) = qbulk;
/* Store small signal parameters */
if (ckt->CKTmode & MODEINITSMSIG)
{ goto line1000;
}
if (!ChargeComputationNeeded)
goto line850;
if (ckt->CKTmode & MODEINITTRAN)
{ *(ckt->CKTstate1 + here->BSIM4v7qb) =
*(ckt->CKTstate0 + here->BSIM4v7qb);
*(ckt->CKTstate1 + here->BSIM4v7qg) =
*(ckt->CKTstate0 + here->BSIM4v7qg);
*(ckt->CKTstate1 + here->BSIM4v7qd) =
*(ckt->CKTstate0 + here->BSIM4v7qd);
if (here->BSIM4v7rgateMod == 3)
*(ckt->CKTstate1 + here->BSIM4v7qgmid) =
*(ckt->CKTstate0 + here->BSIM4v7qgmid);
if (here->BSIM4v7rbodyMod)
{ *(ckt->CKTstate1 + here->BSIM4v7qbs) =
*(ckt->CKTstate0 + here->BSIM4v7qbs);
*(ckt->CKTstate1 + here->BSIM4v7qbd) =
*(ckt->CKTstate0 + here->BSIM4v7qbd);
}
}
error = NIintegrate(ckt, &geq, &ceq, 0.0, here->BSIM4v7qb);
if (error)
return(error);
error = NIintegrate(ckt, &geq, &ceq, 0.0, here->BSIM4v7qg);
if (error)
return(error);
error = NIintegrate(ckt, &geq, &ceq, 0.0, here->BSIM4v7qd);
if (error)
return(error);
if (here->BSIM4v7rgateMod == 3)
{ error = NIintegrate(ckt, &geq, &ceq, 0.0, here->BSIM4v7qgmid);
if (error) return(error);
}
if (here->BSIM4v7rbodyMod)
{ error = NIintegrate(ckt, &geq, &ceq, 0.0, here->BSIM4v7qbs);
if (error)
return(error);
error = NIintegrate(ckt, &geq, &ceq, 0.0, here->BSIM4v7qbd);
if (error)
return(error);
}
goto line860;
line850:
/* Zero gcap and ceqcap if (!ChargeComputationNeeded) */
ceqqg = ceqqb = ceqqd = 0.0;
ceqqjd = ceqqjs = 0.0;
cqcheq = cqdef = 0.0;
gcdgb = gcddb = gcdsb = gcdbb = 0.0;
gcsgb = gcsdb = gcssb = gcsbb = 0.0;
gcggb = gcgdb = gcgsb = gcgbb = 0.0;
gcbdb = gcbgb = gcbsb = gcbbb = 0.0;
gcgmgmb = gcgmdb = gcgmsb = gcgmbb = 0.0;
gcdgmb = gcsgmb = gcbgmb = ceqqgmid = 0.0;
gcdbdb = gcsbsb = 0.0;
gqdef = gcqgb = gcqdb = gcqsb = gcqbb = 0.0;
ggtg = ggtd = ggtb = ggts = 0.0;
sxpart = (1.0 - (dxpart = (here->BSIM4v7mode > 0) ? 0.4 : 0.6));
ddxpart_dVd = ddxpart_dVg = ddxpart_dVb = ddxpart_dVs = 0.0;
dsxpart_dVd = dsxpart_dVg = dsxpart_dVb = dsxpart_dVs = 0.0;
if (here->BSIM4v7trnqsMod)
{ CoxWL = model->BSIM4v7coxe * pParam->BSIM4v7weffCV * here->BSIM4v7nf
* pParam->BSIM4v7leffCV;
T1 = here->BSIM4v7gcrg / CoxWL;
here->BSIM4v7gtau = T1 * ScalingFactor;
}
else
here->BSIM4v7gtau = 0.0;
goto line900;
line860:
/* Calculate equivalent charge current */
cqgate = *(ckt->CKTstate0 + here->BSIM4v7cqg);
cqbody = *(ckt->CKTstate0 + here->BSIM4v7cqb);
cqdrn = *(ckt->CKTstate0 + here->BSIM4v7cqd);
ceqqg = cqgate - gcggb * vgb + gcgdb * vbd + gcgsb * vbs;
ceqqd = cqdrn - gcdgb * vgb - gcdgmb * vgmb + (gcddb + gcdbdb)
* vbd - gcdbdb * vbd_jct + gcdsb * vbs;
ceqqb = cqbody - gcbgb * vgb - gcbgmb * vgmb
+ gcbdb * vbd + gcbsb * vbs;
if (here->BSIM4v7rgateMod == 3)
ceqqgmid = *(ckt->CKTstate0 + here->BSIM4v7cqgmid)
+ gcgmdb * vbd + gcgmsb * vbs - gcgmgmb * vgmb;
else
ceqqgmid = 0.0;
if (here->BSIM4v7rbodyMod)
{ ceqqjs = *(ckt->CKTstate0 + here->BSIM4v7cqbs) + gcsbsb * vbs_jct;
ceqqjd = *(ckt->CKTstate0 + here->BSIM4v7cqbd) + gcdbdb * vbd_jct;
}
if (here->BSIM4v7trnqsMod)
{ T0 = ggtg * vgb - ggtd * vbd - ggts * vbs;
ceqqg += T0;
T1 = qdef * here->BSIM4v7gtau;
ceqqd -= dxpart * T0 + T1 * (ddxpart_dVg * vgb - ddxpart_dVd
* vbd - ddxpart_dVs * vbs);
cqdef = *(ckt->CKTstate0 + here->BSIM4v7cqcdump) - gqdef * qdef;
cqcheq = *(ckt->CKTstate0 + here->BSIM4v7cqcheq)
- (gcqgb * vgb - gcqdb * vbd - gcqsb * vbs) + T0;
}
if (ckt->CKTmode & MODEINITTRAN)
{ *(ckt->CKTstate1 + here->BSIM4v7cqb) =
*(ckt->CKTstate0 + here->BSIM4v7cqb);
*(ckt->CKTstate1 + here->BSIM4v7cqg) =
*(ckt->CKTstate0 + here->BSIM4v7cqg);
*(ckt->CKTstate1 + here->BSIM4v7cqd) =
*(ckt->CKTstate0 + here->BSIM4v7cqd);
if (here->BSIM4v7rgateMod == 3)
*(ckt->CKTstate1 + here->BSIM4v7cqgmid) =
*(ckt->CKTstate0 + here->BSIM4v7cqgmid);
if (here->BSIM4v7rbodyMod)
{ *(ckt->CKTstate1 + here->BSIM4v7cqbs) =
*(ckt->CKTstate0 + here->BSIM4v7cqbs);
*(ckt->CKTstate1 + here->BSIM4v7cqbd) =
*(ckt->CKTstate0 + here->BSIM4v7cqbd);
}
}
/*
* Load current vector
*/
line900:
if (here->BSIM4v7mode >= 0)
{ Gm = here->BSIM4v7gm;
Gmbs = here->BSIM4v7gmbs;
FwdSum = Gm + Gmbs;
RevSum = 0.0;
ceqdrn = model->BSIM4v7type * (cdrain - here->BSIM4v7gds * vds
- Gm * vgs - Gmbs * vbs);
ceqbd = model->BSIM4v7type * (here->BSIM4v7csub + here->BSIM4v7Igidl
- (here->BSIM4v7gbds + here->BSIM4v7ggidld) * vds
- (here->BSIM4v7gbgs + here->BSIM4v7ggidlg) * vgs
- (here->BSIM4v7gbbs + here->BSIM4v7ggidlb) * vbs);
ceqbs = model->BSIM4v7type * (here->BSIM4v7Igisl + here->BSIM4v7ggisls * vds
- here->BSIM4v7ggislg * vgd - here->BSIM4v7ggislb * vbd);
gbbdp = -(here->BSIM4v7gbds);
gbbsp = here->BSIM4v7gbds + here->BSIM4v7gbgs + here->BSIM4v7gbbs;
gbdpg = here->BSIM4v7gbgs;
gbdpdp = here->BSIM4v7gbds;
gbdpb = here->BSIM4v7gbbs;
gbdpsp = -(gbdpg + gbdpdp + gbdpb);
gbspg = 0.0;
gbspdp = 0.0;
gbspb = 0.0;
gbspsp = 0.0;
if (model->BSIM4v7igcMod)
{ gIstotg = here->BSIM4v7gIgsg + here->BSIM4v7gIgcsg;
gIstotd = here->BSIM4v7gIgcsd;
gIstots = here->BSIM4v7gIgss + here->BSIM4v7gIgcss;
gIstotb = here->BSIM4v7gIgcsb;
Istoteq = model->BSIM4v7type * (here->BSIM4v7Igs + here->BSIM4v7Igcs
- gIstotg * vgs - here->BSIM4v7gIgcsd * vds
- here->BSIM4v7gIgcsb * vbs);
gIdtotg = here->BSIM4v7gIgdg + here->BSIM4v7gIgcdg;
gIdtotd = here->BSIM4v7gIgdd + here->BSIM4v7gIgcdd;
gIdtots = here->BSIM4v7gIgcds;
gIdtotb = here->BSIM4v7gIgcdb;
Idtoteq = model->BSIM4v7type * (here->BSIM4v7Igd + here->BSIM4v7Igcd
- here->BSIM4v7gIgdg * vgd - here->BSIM4v7gIgcdg * vgs
- here->BSIM4v7gIgcdd * vds - here->BSIM4v7gIgcdb * vbs);
}
else
{ gIstotg = gIstotd = gIstots = gIstotb = Istoteq = 0.0;
gIdtotg = gIdtotd = gIdtots = gIdtotb = Idtoteq = 0.0;
}
if (model->BSIM4v7igbMod)
{ gIbtotg = here->BSIM4v7gIgbg;
gIbtotd = here->BSIM4v7gIgbd;
gIbtots = here->BSIM4v7gIgbs;
gIbtotb = here->BSIM4v7gIgbb;
Ibtoteq = model->BSIM4v7type * (here->BSIM4v7Igb
- here->BSIM4v7gIgbg * vgs - here->BSIM4v7gIgbd * vds
- here->BSIM4v7gIgbb * vbs);
}
else
gIbtotg = gIbtotd = gIbtots = gIbtotb = Ibtoteq = 0.0;
if ((model->BSIM4v7igcMod != 0) || (model->BSIM4v7igbMod != 0))
{ gIgtotg = gIstotg + gIdtotg + gIbtotg;
gIgtotd = gIstotd + gIdtotd + gIbtotd ;
gIgtots = gIstots + gIdtots + gIbtots;
gIgtotb = gIstotb + gIdtotb + gIbtotb;
Igtoteq = Istoteq + Idtoteq + Ibtoteq;
}
else
gIgtotg = gIgtotd = gIgtots = gIgtotb = Igtoteq = 0.0;
if (here->BSIM4v7rgateMod == 2)
T0 = vges - vgs;
else if (here->BSIM4v7rgateMod == 3)
T0 = vgms - vgs;
if (here->BSIM4v7rgateMod > 1)
{ gcrgd = here->BSIM4v7gcrgd * T0;
gcrgg = here->BSIM4v7gcrgg * T0;
gcrgs = here->BSIM4v7gcrgs * T0;
gcrgb = here->BSIM4v7gcrgb * T0;
ceqgcrg = -(gcrgd * vds + gcrgg * vgs
+ gcrgb * vbs);
gcrgg -= here->BSIM4v7gcrg;
gcrg = here->BSIM4v7gcrg;
}
else
ceqgcrg = gcrg = gcrgd = gcrgg = gcrgs = gcrgb = 0.0;
}
else
{ Gm = -here->BSIM4v7gm;
Gmbs = -here->BSIM4v7gmbs;
FwdSum = 0.0;
RevSum = -(Gm + Gmbs);
ceqdrn = -model->BSIM4v7type * (cdrain + here->BSIM4v7gds * vds
+ Gm * vgd + Gmbs * vbd);
ceqbs = model->BSIM4v7type * (here->BSIM4v7csub + here->BSIM4v7Igisl
+ (here->BSIM4v7gbds + here->BSIM4v7ggisls) * vds
- (here->BSIM4v7gbgs + here->BSIM4v7ggislg) * vgd
- (here->BSIM4v7gbbs + here->BSIM4v7ggislb) * vbd);
ceqbd = model->BSIM4v7type * (here->BSIM4v7Igidl - here->BSIM4v7ggidld * vds
- here->BSIM4v7ggidlg * vgs - here->BSIM4v7ggidlb * vbs);
gbbsp = -(here->BSIM4v7gbds);
gbbdp = here->BSIM4v7gbds + here->BSIM4v7gbgs + here->BSIM4v7gbbs;
gbdpg = 0.0;
gbdpsp = 0.0;
gbdpb = 0.0;
gbdpdp = 0.0;
gbspg = here->BSIM4v7gbgs;
gbspsp = here->BSIM4v7gbds;
gbspb = here->BSIM4v7gbbs;
gbspdp = -(gbspg + gbspsp + gbspb);
if (model->BSIM4v7igcMod)
{ gIstotg = here->BSIM4v7gIgsg + here->BSIM4v7gIgcdg;
gIstotd = here->BSIM4v7gIgcds;
gIstots = here->BSIM4v7gIgss + here->BSIM4v7gIgcdd;
gIstotb = here->BSIM4v7gIgcdb;
Istoteq = model->BSIM4v7type * (here->BSIM4v7Igs + here->BSIM4v7Igcd
- here->BSIM4v7gIgsg * vgs - here->BSIM4v7gIgcdg * vgd
+ here->BSIM4v7gIgcdd * vds - here->BSIM4v7gIgcdb * vbd);
gIdtotg = here->BSIM4v7gIgdg + here->BSIM4v7gIgcsg;
gIdtotd = here->BSIM4v7gIgdd + here->BSIM4v7gIgcss;
gIdtots = here->BSIM4v7gIgcsd;
gIdtotb = here->BSIM4v7gIgcsb;
Idtoteq = model->BSIM4v7type * (here->BSIM4v7Igd + here->BSIM4v7Igcs
- (here->BSIM4v7gIgdg + here->BSIM4v7gIgcsg) * vgd
+ here->BSIM4v7gIgcsd * vds - here->BSIM4v7gIgcsb * vbd);
}
else
{ gIstotg = gIstotd = gIstots = gIstotb = Istoteq = 0.0;
gIdtotg = gIdtotd = gIdtots = gIdtotb = Idtoteq = 0.0;
}
if (model->BSIM4v7igbMod)
{ gIbtotg = here->BSIM4v7gIgbg;
gIbtotd = here->BSIM4v7gIgbs;
gIbtots = here->BSIM4v7gIgbd;
gIbtotb = here->BSIM4v7gIgbb;
Ibtoteq = model->BSIM4v7type * (here->BSIM4v7Igb
- here->BSIM4v7gIgbg * vgd + here->BSIM4v7gIgbd * vds
- here->BSIM4v7gIgbb * vbd);
}
else
gIbtotg = gIbtotd = gIbtots = gIbtotb = Ibtoteq = 0.0;
if ((model->BSIM4v7igcMod != 0) || (model->BSIM4v7igbMod != 0))
{ gIgtotg = gIstotg + gIdtotg + gIbtotg;
gIgtotd = gIstotd + gIdtotd + gIbtotd ;
gIgtots = gIstots + gIdtots + gIbtots;
gIgtotb = gIstotb + gIdtotb + gIbtotb;
Igtoteq = Istoteq + Idtoteq + Ibtoteq;
}
else
gIgtotg = gIgtotd = gIgtots = gIgtotb = Igtoteq = 0.0;
if (here->BSIM4v7rgateMod == 2)
T0 = vges - vgs;
else if (here->BSIM4v7rgateMod == 3)
T0 = vgms - vgs;
if (here->BSIM4v7rgateMod > 1)
{ gcrgd = here->BSIM4v7gcrgs * T0;
gcrgg = here->BSIM4v7gcrgg * T0;
gcrgs = here->BSIM4v7gcrgd * T0;
gcrgb = here->BSIM4v7gcrgb * T0;
ceqgcrg = -(gcrgg * vgd - gcrgs * vds
+ gcrgb * vbd);
gcrgg -= here->BSIM4v7gcrg;
gcrg = here->BSIM4v7gcrg;
}
else
ceqgcrg = gcrg = gcrgd = gcrgg = gcrgs = gcrgb = 0.0;
}
if (model->BSIM4v7rdsMod == 1)
{ ceqgstot = model->BSIM4v7type * (here->BSIM4v7gstotd * vds
+ here->BSIM4v7gstotg * vgs + here->BSIM4v7gstotb * vbs);
/* WDLiu: ceqgstot flowing away from sNodePrime */
gstot = here->BSIM4v7gstot;
gstotd = here->BSIM4v7gstotd;
gstotg = here->BSIM4v7gstotg;
gstots = here->BSIM4v7gstots - gstot;
gstotb = here->BSIM4v7gstotb;
ceqgdtot = -model->BSIM4v7type * (here->BSIM4v7gdtotd * vds
+ here->BSIM4v7gdtotg * vgs + here->BSIM4v7gdtotb * vbs);
/* WDLiu: ceqgdtot defined as flowing into dNodePrime */
gdtot = here->BSIM4v7gdtot;
gdtotd = here->BSIM4v7gdtotd - gdtot;
gdtotg = here->BSIM4v7gdtotg;
gdtots = here->BSIM4v7gdtots;
gdtotb = here->BSIM4v7gdtotb;
}
else
{ gstot = gstotd = gstotg = gstots = gstotb = ceqgstot = 0.0;
gdtot = gdtotd = gdtotg = gdtots = gdtotb = ceqgdtot = 0.0;
}
if (model->BSIM4v7type > 0)
{ ceqjs = (here->BSIM4v7cbs - here->BSIM4v7gbs * vbs_jct);
ceqjd = (here->BSIM4v7cbd - here->BSIM4v7gbd * vbd_jct);
}
else
{ ceqjs = -(here->BSIM4v7cbs - here->BSIM4v7gbs * vbs_jct);
ceqjd = -(here->BSIM4v7cbd - here->BSIM4v7gbd * vbd_jct);
ceqqg = -ceqqg;
ceqqd = -ceqqd;
ceqqb = -ceqqb;
ceqgcrg = -ceqgcrg;
if (here->BSIM4v7trnqsMod)
{ cqdef = -cqdef;
cqcheq = -cqcheq;
}
if (here->BSIM4v7rbodyMod)
{ ceqqjs = -ceqqjs;
ceqqjd = -ceqqjd;
}
if (here->BSIM4v7rgateMod == 3)
ceqqgmid = -ceqqgmid;
}
/*
* Loading RHS
*/
m = here->BSIM4v7m;
#ifdef USE_OMP
here->BSIM4v7rhsdPrime = m * (ceqjd - ceqbd + ceqgdtot
- ceqdrn - ceqqd + Idtoteq);
here->BSIM4v7rhsgPrime = m * (ceqqg - ceqgcrg + Igtoteq);
if (here->BSIM4v7rgateMod == 2)
here->BSIM4v7rhsgExt = m * ceqgcrg;
else if (here->BSIM4v7rgateMod == 3)
here->BSIM4v7grhsMid = m * (ceqqgmid + ceqgcrg);
if (!here->BSIM4v7rbodyMod)
{ here->BSIM4v7rhsbPrime = m * (ceqbd + ceqbs - ceqjd
- ceqjs - ceqqb + Ibtoteq);
here->BSIM4v7rhssPrime = m * (ceqdrn - ceqbs + ceqjs
+ ceqqg + ceqqb + ceqqd + ceqqgmid - ceqgstot + Istoteq);
}
else
{ here->BSIM4v7rhsdb = m * (ceqjd + ceqqjd);
here->BSIM4v7rhsbPrime = m * (ceqbd + ceqbs - ceqqb + Ibtoteq);
here->BSIM4v7rhssb = m * (ceqjs + ceqqjs);
here->BSIM4v7rhssPrime = m * (ceqdrn - ceqbs + ceqjs + ceqqd
+ ceqqg + ceqqb + ceqqjd + ceqqjs + ceqqgmid - ceqgstot + Istoteq);
}
if (model->BSIM4v7rdsMod)
{ here->BSIM4v7rhsd = m * ceqgdtot;
here->BSIM4v7rhss = m * ceqgstot;
}
if (here->BSIM4v7trnqsMod)
here->BSIM4v7rhsq = m * (cqcheq - cqdef);
#else
(*(ckt->CKTrhs + here->BSIM4v7dNodePrime) += m * (ceqjd - ceqbd + ceqgdtot
- ceqdrn - ceqqd + Idtoteq));
(*(ckt->CKTrhs + here->BSIM4v7gNodePrime) -= m * (ceqqg - ceqgcrg + Igtoteq));
if (here->BSIM4v7rgateMod == 2)
(*(ckt->CKTrhs + here->BSIM4v7gNodeExt) -= m * ceqgcrg);
else if (here->BSIM4v7rgateMod == 3)
(*(ckt->CKTrhs + here->BSIM4v7gNodeMid) -= m * (ceqqgmid + ceqgcrg));
if (!here->BSIM4v7rbodyMod)
{ (*(ckt->CKTrhs + here->BSIM4v7bNodePrime) += m * (ceqbd + ceqbs - ceqjd
- ceqjs - ceqqb + Ibtoteq));
(*(ckt->CKTrhs + here->BSIM4v7sNodePrime) += m * (ceqdrn - ceqbs + ceqjs
+ ceqqg + ceqqb + ceqqd + ceqqgmid - ceqgstot + Istoteq));
}
else
{ (*(ckt->CKTrhs + here->BSIM4v7dbNode) -= m * (ceqjd + ceqqjd));
(*(ckt->CKTrhs + here->BSIM4v7bNodePrime) += m * (ceqbd + ceqbs - ceqqb + Ibtoteq));
(*(ckt->CKTrhs + here->BSIM4v7sbNode) -= m * (ceqjs + ceqqjs));
(*(ckt->CKTrhs + here->BSIM4v7sNodePrime) += m * (ceqdrn - ceqbs + ceqjs + ceqqd
+ ceqqg + ceqqb + ceqqjd + ceqqjs + ceqqgmid - ceqgstot + Istoteq));
}
if (model->BSIM4v7rdsMod)
{ (*(ckt->CKTrhs + here->BSIM4v7dNode) -= m * ceqgdtot);
(*(ckt->CKTrhs + here->BSIM4v7sNode) += m * ceqgstot);
}
if (here->BSIM4v7trnqsMod)
*(ckt->CKTrhs + here->BSIM4v7qNode) += m * (cqcheq - cqdef);
#endif
/*
* Loading matrix
*/
if (!here->BSIM4v7rbodyMod)
{ gjbd = here->BSIM4v7gbd;
gjbs = here->BSIM4v7gbs;
}
else
gjbd = gjbs = 0.0;
if (!model->BSIM4v7rdsMod)
{ gdpr = here->BSIM4v7drainConductance;
gspr = here->BSIM4v7sourceConductance;
}
else
gdpr = gspr = 0.0;
geltd = here->BSIM4v7grgeltd;
T1 = qdef * here->BSIM4v7gtau;
#ifdef USE_OMP
if (here->BSIM4v7rgateMod == 1)
{ here->BSIM4v7_1 = m * geltd;
here->BSIM4v7_2 = m * geltd;
here->BSIM4v7_3 = m * geltd;
here->BSIM4v7_4 = m * (gcggb + geltd - ggtg + gIgtotg);
here->BSIM4v7_5 = m * (gcgdb - ggtd + gIgtotd);
here->BSIM4v7_6 = m * (gcgsb - ggts + gIgtots);
here->BSIM4v7_7 = m * (gcgbb - ggtb + gIgtotb);
} /* WDLiu: gcrg already subtracted from all gcrgg below */
else if (here->BSIM4v7rgateMod == 2)
{ here->BSIM4v7_8 = m * gcrg;
here->BSIM4v7_9 = m * gcrgg;
here->BSIM4v7_10 = m * gcrgd;
here->BSIM4v7_11 = m * gcrgs;
here->BSIM4v7_12 = m * gcrgb;
here->BSIM4v7_13 = m * gcrg;
here->BSIM4v7_14 = m * (gcggb - gcrgg - ggtg + gIgtotg);
here->BSIM4v7_15 = m * (gcgdb - gcrgd - ggtd + gIgtotd);
here->BSIM4v7_16 = m * (gcgsb - gcrgs - ggts + gIgtots);
here->BSIM4v7_17 = m * (gcgbb - gcrgb - ggtb + gIgtotb);
}
else if (here->BSIM4v7rgateMod == 3)
{ here->BSIM4v7_18 = m * geltd;
here->BSIM4v7_19 = m * geltd;
here->BSIM4v7_20 = m * geltd;
here->BSIM4v7_21 = m * (geltd + gcrg + gcgmgmb);
here->BSIM4v7_22 = m * (gcrgd + gcgmdb);
here->BSIM4v7_23 = m * gcrgg;
here->BSIM4v7_24 = m * (gcrgs + gcgmsb);
here->BSIM4v7_25 = m * (gcrgb + gcgmbb);
here->BSIM4v7_26 = m * gcdgmb;
here->BSIM4v7_27 = m * gcrg;
here->BSIM4v7_28 = m * gcsgmb;
here->BSIM4v7_29 = m * gcbgmb;
here->BSIM4v7_30 = m * (gcggb - gcrgg - ggtg + gIgtotg);
here->BSIM4v7_31 = m * (gcgdb - gcrgd - ggtd + gIgtotd);
here->BSIM4v7_32 = m * (gcgsb - gcrgs - ggts + gIgtots);
here->BSIM4v7_33 = m * (gcgbb - gcrgb - ggtb + gIgtotb);
}
else
{ here->BSIM4v7_34 = m * (gcggb - ggtg + gIgtotg);
here->BSIM4v7_35 = m * (gcgdb - ggtd + gIgtotd);
here->BSIM4v7_36 = m * (gcgsb - ggts + gIgtots);
here->BSIM4v7_37 = m * (gcgbb - ggtb + gIgtotb);
}
if (model->BSIM4v7rdsMod)
{ here->BSIM4v7_38 = m * gdtotg;
here->BSIM4v7_39 = m * gdtots;
here->BSIM4v7_40 = m * gdtotb;
here->BSIM4v7_41 = m * gstotd;
here->BSIM4v7_42 = m * gstotg;
here->BSIM4v7_43 = m * gstotb;
}
here->BSIM4v7_44 = m * (gdpr + here->BSIM4v7gds + here->BSIM4v7gbd + T1 * ddxpart_dVd
- gdtotd + RevSum + gcddb + gbdpdp + dxpart * ggtd - gIdtotd);
here->BSIM4v7_45 = m * (gdpr + gdtot);
here->BSIM4v7_46 = m * (Gm + gcdgb - gdtotg + gbdpg - gIdtotg
+ dxpart * ggtg + T1 * ddxpart_dVg);
here->BSIM4v7_47 = m * (here->BSIM4v7gds + gdtots - dxpart * ggts + gIdtots
- T1 * ddxpart_dVs + FwdSum - gcdsb - gbdpsp);
here->BSIM4v7_48 = m * (gjbd + gdtotb - Gmbs - gcdbb - gbdpb + gIdtotb
- T1 * ddxpart_dVb - dxpart * ggtb);
here->BSIM4v7_49 = m * (gdpr - gdtotd);
here->BSIM4v7_50 = m * (gdpr + gdtot);
here->BSIM4v7_51 = m * (here->BSIM4v7gds + gstotd + RevSum - gcsdb - gbspdp
- T1 * dsxpart_dVd - sxpart * ggtd + gIstotd);
here->BSIM4v7_52 = m * (gcsgb - Gm - gstotg + gbspg + sxpart * ggtg
+ T1 * dsxpart_dVg - gIstotg);
here->BSIM4v7_53 = m * (gspr + here->BSIM4v7gds + here->BSIM4v7gbs + T1 * dsxpart_dVs
- gstots + FwdSum + gcssb + gbspsp + sxpart * ggts - gIstots);
here->BSIM4v7_54 = m * (gspr + gstot);
here->BSIM4v7_55 = m * (gjbs + gstotb + Gmbs - gcsbb - gbspb - sxpart * ggtb
- T1 * dsxpart_dVb + gIstotb);
here->BSIM4v7_56 = m * (gspr - gstots);
here->BSIM4v7_57 = m * (gspr + gstot);
here->BSIM4v7_58 = m * (gcbdb - gjbd + gbbdp - gIbtotd);
here->BSIM4v7_59 = m * (gcbgb - here->BSIM4v7gbgs - gIbtotg);
here->BSIM4v7_60 = m * (gcbsb - gjbs + gbbsp - gIbtots);
here->BSIM4v7_61 = m * (gjbd + gjbs + gcbbb - here->BSIM4v7gbbs - gIbtotb);
ggidld = here->BSIM4v7ggidld;
ggidlg = here->BSIM4v7ggidlg;
ggidlb = here->BSIM4v7ggidlb;
ggislg = here->BSIM4v7ggislg;
ggisls = here->BSIM4v7ggisls;
ggislb = here->BSIM4v7ggislb;
/* stamp gidl */
here->BSIM4v7_62 = m * ggidld;
here->BSIM4v7_63 = m * ggidlg;
here->BSIM4v7_64 = m * (ggidlg + ggidld + ggidlb);
here->BSIM4v7_65 = m * ggidlb;
here->BSIM4v7_66 = m * ggidld;
here->BSIM4v7_67 = m * ggidlg;
here->BSIM4v7_68 = m * (ggidlg + ggidld + ggidlb);
here->BSIM4v7_69 = m * ggidlb;
/* stamp gisl */
here->BSIM4v7_70 = m * (ggisls + ggislg + ggislb);
here->BSIM4v7_71 = m * ggislg;
here->BSIM4v7_72 = m * ggisls;
here->BSIM4v7_73 = m * ggislb;
here->BSIM4v7_74 = m * (ggislg + ggisls + ggislb);
here->BSIM4v7_75 = m * ggislg;
here->BSIM4v7_76 = m * ggisls;
here->BSIM4v7_77 = m * ggislb;
if (here->BSIM4v7rbodyMod)
{ here->BSIM4v7_78 = m * (gcdbdb - here->BSIM4v7gbd);
here->BSIM4v7_79 = m * (here->BSIM4v7gbs - gcsbsb);
here->BSIM4v7_80 = m * (gcdbdb - here->BSIM4v7gbd);
here->BSIM4v7_81 = m * (here->BSIM4v7gbd - gcdbdb
+ here->BSIM4v7grbpd + here->BSIM4v7grbdb);
here->BSIM4v7_82 = m * here->BSIM4v7grbpd;
here->BSIM4v7_83 = m * here->BSIM4v7grbdb;
here->BSIM4v7_84 = m * here->BSIM4v7grbpd;
here->BSIM4v7_85 = m * here->BSIM4v7grbpb;
here->BSIM4v7_86 = m * here->BSIM4v7grbps;
here->BSIM4v7_87 = m * (here->BSIM4v7grbpd + here->BSIM4v7grbps
+ here->BSIM4v7grbpb);
/* WDLiu: (gcbbb - here->BSIM4v7gbbs) already added to BPbpPtr */
here->BSIM4v7_88 = m * (gcsbsb - here->BSIM4v7gbs);
here->BSIM4v7_89 = m * here->BSIM4v7grbps;
here->BSIM4v7_90 = m * here->BSIM4v7grbsb;
here->BSIM4v7_91 = m * (here->BSIM4v7gbs - gcsbsb
+ here->BSIM4v7grbps + here->BSIM4v7grbsb);
here->BSIM4v7_92 = m * here->BSIM4v7grbdb;
here->BSIM4v7_93 = m * here->BSIM4v7grbpb;
here->BSIM4v7_94 = m * here->BSIM4v7grbsb;
here->BSIM4v7_95 = m * (here->BSIM4v7grbsb + here->BSIM4v7grbdb
+ here->BSIM4v7grbpb);
}
if (here->BSIM4v7trnqsMod)
{ here->BSIM4v7_96 = m * (gqdef + here->BSIM4v7gtau);
here->BSIM4v7_97 = m * (ggtg - gcqgb);
here->BSIM4v7_98 = m * (ggtd - gcqdb);
here->BSIM4v7_99 = m * (ggts - gcqsb);
here->BSIM4v7_100 = m * (ggtb - gcqbb);
here->BSIM4v7_101 = m * dxpart * here->BSIM4v7gtau;
here->BSIM4v7_102 = m * sxpart * here->BSIM4v7gtau;
here->BSIM4v7_103 = m * here->BSIM4v7gtau;
}
#else
if (here->BSIM4v7rgateMod == 1)
{ (*(here->BSIM4v7GEgePtr) += m * geltd);
(*(here->BSIM4v7GPgePtr) -= m * geltd);
(*(here->BSIM4v7GEgpPtr) -= m * geltd);
(*(here->BSIM4v7GPgpPtr) += m * (gcggb + geltd - ggtg + gIgtotg));
(*(here->BSIM4v7GPdpPtr) += m * (gcgdb - ggtd + gIgtotd));
(*(here->BSIM4v7GPspPtr) += m * (gcgsb - ggts + gIgtots));
(*(here->BSIM4v7GPbpPtr) += m * (gcgbb - ggtb + gIgtotb));
} /* WDLiu: gcrg already subtracted from all gcrgg below */
else if (here->BSIM4v7rgateMod == 2)
{ (*(here->BSIM4v7GEgePtr) += m * gcrg);
(*(here->BSIM4v7GEgpPtr) += m * gcrgg);
(*(here->BSIM4v7GEdpPtr) += m * gcrgd);
(*(here->BSIM4v7GEspPtr) += m * gcrgs);
(*(here->BSIM4v7GEbpPtr) += m * gcrgb);
(*(here->BSIM4v7GPgePtr) -= m * gcrg);
(*(here->BSIM4v7GPgpPtr) += m * (gcggb - gcrgg - ggtg + gIgtotg));
(*(here->BSIM4v7GPdpPtr) += m * (gcgdb - gcrgd - ggtd + gIgtotd));
(*(here->BSIM4v7GPspPtr) += m * (gcgsb - gcrgs - ggts + gIgtots));
(*(here->BSIM4v7GPbpPtr) += m * (gcgbb - gcrgb - ggtb + gIgtotb));
}
else if (here->BSIM4v7rgateMod == 3)
{ (*(here->BSIM4v7GEgePtr) += m * geltd);
(*(here->BSIM4v7GEgmPtr) -= m * geltd);
(*(here->BSIM4v7GMgePtr) -= m * geltd);
(*(here->BSIM4v7GMgmPtr) += m * (geltd + gcrg + gcgmgmb));
(*(here->BSIM4v7GMdpPtr) += m * (gcrgd + gcgmdb));
(*(here->BSIM4v7GMgpPtr) += m * gcrgg);
(*(here->BSIM4v7GMspPtr) += m * (gcrgs + gcgmsb));
(*(here->BSIM4v7GMbpPtr) += m * (gcrgb + gcgmbb));
(*(here->BSIM4v7DPgmPtr) += m * gcdgmb);
(*(here->BSIM4v7GPgmPtr) -= m * gcrg);
(*(here->BSIM4v7SPgmPtr) += m * gcsgmb);
(*(here->BSIM4v7BPgmPtr) += m * gcbgmb);
(*(here->BSIM4v7GPgpPtr) += m * (gcggb - gcrgg - ggtg + gIgtotg));
(*(here->BSIM4v7GPdpPtr) += m * (gcgdb - gcrgd - ggtd + gIgtotd));
(*(here->BSIM4v7GPspPtr) += m * (gcgsb - gcrgs - ggts + gIgtots));
(*(here->BSIM4v7GPbpPtr) += m * (gcgbb - gcrgb - ggtb + gIgtotb));
}
else
{ (*(here->BSIM4v7GPgpPtr) += m * (gcggb - ggtg + gIgtotg));
(*(here->BSIM4v7GPdpPtr) += m * (gcgdb - ggtd + gIgtotd));
(*(here->BSIM4v7GPspPtr) += m * (gcgsb - ggts + gIgtots));
(*(here->BSIM4v7GPbpPtr) += m * (gcgbb - ggtb + gIgtotb));
}
if (model->BSIM4v7rdsMod)
{ (*(here->BSIM4v7DgpPtr) += m * gdtotg);
(*(here->BSIM4v7DspPtr) += m * gdtots);
(*(here->BSIM4v7DbpPtr) += m * gdtotb);
(*(here->BSIM4v7SdpPtr) += m * gstotd);
(*(here->BSIM4v7SgpPtr) += m * gstotg);
(*(here->BSIM4v7SbpPtr) += m * gstotb);
}
(*(here->BSIM4v7DPdpPtr) += m * (gdpr + here->BSIM4v7gds + here->BSIM4v7gbd + T1 * ddxpart_dVd
- gdtotd + RevSum + gcddb + gbdpdp + dxpart * ggtd - gIdtotd));
(*(here->BSIM4v7DPdPtr) -= m * (gdpr + gdtot));
(*(here->BSIM4v7DPgpPtr) += m * (Gm + gcdgb - gdtotg + gbdpg - gIdtotg
+ dxpart * ggtg + T1 * ddxpart_dVg));
(*(here->BSIM4v7DPspPtr) -= m * (here->BSIM4v7gds + gdtots - dxpart * ggts + gIdtots
- T1 * ddxpart_dVs + FwdSum - gcdsb - gbdpsp));
(*(here->BSIM4v7DPbpPtr) -= m * (gjbd + gdtotb - Gmbs - gcdbb - gbdpb + gIdtotb
- T1 * ddxpart_dVb - dxpart * ggtb));
(*(here->BSIM4v7DdpPtr) -= m * (gdpr - gdtotd));
(*(here->BSIM4v7DdPtr) += m * (gdpr + gdtot));
(*(here->BSIM4v7SPdpPtr) -= m * (here->BSIM4v7gds + gstotd + RevSum - gcsdb - gbspdp
- T1 * dsxpart_dVd - sxpart * ggtd + gIstotd));
(*(here->BSIM4v7SPgpPtr) += m * (gcsgb - Gm - gstotg + gbspg + sxpart * ggtg
+ T1 * dsxpart_dVg - gIstotg));
(*(here->BSIM4v7SPspPtr) += m * (gspr + here->BSIM4v7gds + here->BSIM4v7gbs + T1 * dsxpart_dVs
- gstots + FwdSum + gcssb + gbspsp + sxpart * ggts - gIstots));
(*(here->BSIM4v7SPsPtr) -= m * (gspr + gstot));
(*(here->BSIM4v7SPbpPtr) -= m * (gjbs + gstotb + Gmbs - gcsbb - gbspb - sxpart * ggtb
- T1 * dsxpart_dVb + gIstotb));
(*(here->BSIM4v7SspPtr) -= m * (gspr - gstots));
(*(here->BSIM4v7SsPtr) += m * (gspr + gstot));
(*(here->BSIM4v7BPdpPtr) += m * (gcbdb - gjbd + gbbdp - gIbtotd));
(*(here->BSIM4v7BPgpPtr) += m * (gcbgb - here->BSIM4v7gbgs - gIbtotg));
(*(here->BSIM4v7BPspPtr) += m * (gcbsb - gjbs + gbbsp - gIbtots));
(*(here->BSIM4v7BPbpPtr) += m * (gjbd + gjbs + gcbbb - here->BSIM4v7gbbs
- gIbtotb));
ggidld = here->BSIM4v7ggidld;
ggidlg = here->BSIM4v7ggidlg;
ggidlb = here->BSIM4v7ggidlb;
ggislg = here->BSIM4v7ggislg;
ggisls = here->BSIM4v7ggisls;
ggislb = here->BSIM4v7ggislb;
/* stamp gidl */
(*(here->BSIM4v7DPdpPtr) += m * ggidld);
(*(here->BSIM4v7DPgpPtr) += m * ggidlg);
(*(here->BSIM4v7DPspPtr) -= m * (ggidlg + ggidld + ggidlb));
(*(here->BSIM4v7DPbpPtr) += m * ggidlb);
(*(here->BSIM4v7BPdpPtr) -= m * ggidld);
(*(here->BSIM4v7BPgpPtr) -= m * ggidlg);
(*(here->BSIM4v7BPspPtr) += m * (ggidlg + ggidld + ggidlb));
(*(here->BSIM4v7BPbpPtr) -= m * ggidlb);
/* stamp gisl */
(*(here->BSIM4v7SPdpPtr) -= m * (ggisls + ggislg + ggislb));
(*(here->BSIM4v7SPgpPtr) += m * ggislg);
(*(here->BSIM4v7SPspPtr) += m * ggisls);
(*(here->BSIM4v7SPbpPtr) += m * ggislb);
(*(here->BSIM4v7BPdpPtr) += m * (ggislg + ggisls + ggislb));
(*(here->BSIM4v7BPgpPtr) -= m * ggislg);
(*(here->BSIM4v7BPspPtr) -= m * ggisls);
(*(here->BSIM4v7BPbpPtr) -= m * ggislb);
if (here->BSIM4v7rbodyMod)
{ (*(here->BSIM4v7DPdbPtr) += m * (gcdbdb - here->BSIM4v7gbd));
(*(here->BSIM4v7SPsbPtr) -= m * (here->BSIM4v7gbs - gcsbsb));
(*(here->BSIM4v7DBdpPtr) += m * (gcdbdb - here->BSIM4v7gbd));
(*(here->BSIM4v7DBdbPtr) += m * (here->BSIM4v7gbd - gcdbdb
+ here->BSIM4v7grbpd + here->BSIM4v7grbdb));
(*(here->BSIM4v7DBbpPtr) -= m * here->BSIM4v7grbpd);
(*(here->BSIM4v7DBbPtr) -= m * here->BSIM4v7grbdb);
(*(here->BSIM4v7BPdbPtr) -= m * here->BSIM4v7grbpd);
(*(here->BSIM4v7BPbPtr) -= m * here->BSIM4v7grbpb);
(*(here->BSIM4v7BPsbPtr) -= m * here->BSIM4v7grbps);
(*(here->BSIM4v7BPbpPtr) += m * (here->BSIM4v7grbpd + here->BSIM4v7grbps
+ here->BSIM4v7grbpb));
/* WDLiu: (gcbbb - here->BSIM4v7gbbs) already added to BPbpPtr */
(*(here->BSIM4v7SBspPtr) += m * (gcsbsb - here->BSIM4v7gbs));
(*(here->BSIM4v7SBbpPtr) -= m * here->BSIM4v7grbps);
(*(here->BSIM4v7SBbPtr) -= m * here->BSIM4v7grbsb);
(*(here->BSIM4v7SBsbPtr) += m * (here->BSIM4v7gbs - gcsbsb
+ here->BSIM4v7grbps + here->BSIM4v7grbsb));
(*(here->BSIM4v7BdbPtr) -= m * here->BSIM4v7grbdb);
(*(here->BSIM4v7BbpPtr) -= m * here->BSIM4v7grbpb);
(*(here->BSIM4v7BsbPtr) -= m * here->BSIM4v7grbsb);
(*(here->BSIM4v7BbPtr) += m * (here->BSIM4v7grbsb + here->BSIM4v7grbdb
+ here->BSIM4v7grbpb));
}
if (here->BSIM4v7trnqsMod)
{ (*(here->BSIM4v7QqPtr) += m * (gqdef + here->BSIM4v7gtau));
(*(here->BSIM4v7QgpPtr) += m * (ggtg - gcqgb));
(*(here->BSIM4v7QdpPtr) += m * (ggtd - gcqdb));
(*(here->BSIM4v7QspPtr) += m * (ggts - gcqsb));
(*(here->BSIM4v7QbpPtr) += m * (ggtb - gcqbb));
(*(here->BSIM4v7DPqPtr) += m * dxpart * here->BSIM4v7gtau);
(*(here->BSIM4v7SPqPtr) += m * sxpart * here->BSIM4v7gtau);
(*(here->BSIM4v7GPqPtr) -= m * here->BSIM4v7gtau);
}
#endif
line1000: ;
#ifndef USE_OMP
} /* End of MOSFET Instance */
} /* End of Model Instance */
#endif
return(OK);
}
/* function to compute poly depletion effect */
int BSIM4v7polyDepletion(
double phi,
double ngate,
double epsgate,
double coxe,
double Vgs,
double *Vgs_eff,
double *dVgs_eff_dVg)
{
double T1, T2, T3, T4, T5, T6, T7, T8;
/* Poly Gate Si Depletion Effect */
if ((ngate > 1.0e18) &&
(ngate < 1.0e25) && (Vgs > phi) && (epsgate!=0)
){
T1 = 1.0e6 * CHARGE * epsgate * ngate / (coxe * coxe);
T8 = Vgs - phi;
T4 = sqrt(1.0 + 2.0 * T8 / T1);
T2 = 2.0 * T8 / (T4 + 1.0);
T3 = 0.5 * T2 * T2 / T1; /* T3 = Vpoly */
T7 = 1.12 - T3 - 0.05;
T6 = sqrt(T7 * T7 + 0.224);
T5 = 1.12 - 0.5 * (T7 + T6);
*Vgs_eff = Vgs - T5;
*dVgs_eff_dVg = 1.0 - (0.5 - 0.5 / T4) * (1.0 + T7 / T6);
}
else {
*Vgs_eff = Vgs;
*dVgs_eff_dVg = 1.0;
}
return(0);
}
#ifdef USE_OMP
void BSIM4v7LoadRhsMat(GENmodel *inModel, CKTcircuit *ckt)
{
int InstCount, idx;
BSIM4v7instance **InstArray;
BSIM4v7instance *here;
BSIM4v7model *model = (BSIM4v7model*)inModel;
InstArray = model->BSIM4v7InstanceArray;
InstCount = model->BSIM4v7InstCount;
for(idx = 0; idx < InstCount; idx++) {
here = InstArray[idx];
model = BSIM4v7modPtr(here);
/* Update b for Ax = b */
(*(ckt->CKTrhs + here->BSIM4v7dNodePrime) += here->BSIM4v7rhsdPrime);
(*(ckt->CKTrhs + here->BSIM4v7gNodePrime) -= here->BSIM4v7rhsgPrime);
if (here->BSIM4v7rgateMod == 2)
(*(ckt->CKTrhs + here->BSIM4v7gNodeExt) -= here->BSIM4v7rhsgExt);
else if (here->BSIM4v7rgateMod == 3)
(*(ckt->CKTrhs + here->BSIM4v7gNodeMid) -= here->BSIM4v7grhsMid);
if (!here->BSIM4v7rbodyMod)
{ (*(ckt->CKTrhs + here->BSIM4v7bNodePrime) += here->BSIM4v7rhsbPrime);
(*(ckt->CKTrhs + here->BSIM4v7sNodePrime) += here->BSIM4v7rhssPrime);
}
else
{ (*(ckt->CKTrhs + here->BSIM4v7dbNode) -= here->BSIM4v7rhsdb);
(*(ckt->CKTrhs + here->BSIM4v7bNodePrime) += here->BSIM4v7rhsbPrime);
(*(ckt->CKTrhs + here->BSIM4v7sbNode) -= here->BSIM4v7rhssb);
(*(ckt->CKTrhs + here->BSIM4v7sNodePrime) += here->BSIM4v7rhssPrime);
}
if (model->BSIM4v7rdsMod)
{ (*(ckt->CKTrhs + here->BSIM4v7dNode) -= here->BSIM4v7rhsd);
(*(ckt->CKTrhs + here->BSIM4v7sNode) += here->BSIM4v7rhss);
}
if (here->BSIM4v7trnqsMod)
*(ckt->CKTrhs + here->BSIM4v7qNode) += here->BSIM4v7rhsq;
/* Update A for Ax = b */
if (here->BSIM4v7rgateMod == 1)
{ (*(here->BSIM4v7GEgePtr) += here->BSIM4v7_1);
(*(here->BSIM4v7GPgePtr) -= here->BSIM4v7_2);
(*(here->BSIM4v7GEgpPtr) -= here->BSIM4v7_3);
(*(here->BSIM4v7GPgpPtr) += here->BSIM4v7_4);
(*(here->BSIM4v7GPdpPtr) += here->BSIM4v7_5);
(*(here->BSIM4v7GPspPtr) += here->BSIM4v7_6);
(*(here->BSIM4v7GPbpPtr) += here->BSIM4v7_7);
}
else if (here->BSIM4v7rgateMod == 2)
{ (*(here->BSIM4v7GEgePtr) += here->BSIM4v7_8);
(*(here->BSIM4v7GEgpPtr) += here->BSIM4v7_9);
(*(here->BSIM4v7GEdpPtr) += here->BSIM4v7_10);
(*(here->BSIM4v7GEspPtr) += here->BSIM4v7_11);
(*(here->BSIM4v7GEbpPtr) += here->BSIM4v7_12);
(*(here->BSIM4v7GPgePtr) -= here->BSIM4v7_13);
(*(here->BSIM4v7GPgpPtr) += here->BSIM4v7_14);
(*(here->BSIM4v7GPdpPtr) += here->BSIM4v7_15);
(*(here->BSIM4v7GPspPtr) += here->BSIM4v7_16);
(*(here->BSIM4v7GPbpPtr) += here->BSIM4v7_17);
}
else if (here->BSIM4v7rgateMod == 3)
{ (*(here->BSIM4v7GEgePtr) += here->BSIM4v7_18);
(*(here->BSIM4v7GEgmPtr) -= here->BSIM4v7_19);
(*(here->BSIM4v7GMgePtr) -= here->BSIM4v7_20);
(*(here->BSIM4v7GMgmPtr) += here->BSIM4v7_21);
(*(here->BSIM4v7GMdpPtr) += here->BSIM4v7_22);
(*(here->BSIM4v7GMgpPtr) += here->BSIM4v7_23);
(*(here->BSIM4v7GMspPtr) += here->BSIM4v7_24);
(*(here->BSIM4v7GMbpPtr) += here->BSIM4v7_25);
(*(here->BSIM4v7DPgmPtr) += here->BSIM4v7_26);
(*(here->BSIM4v7GPgmPtr) -= here->BSIM4v7_27);
(*(here->BSIM4v7SPgmPtr) += here->BSIM4v7_28);
(*(here->BSIM4v7BPgmPtr) += here->BSIM4v7_29);
(*(here->BSIM4v7GPgpPtr) += here->BSIM4v7_30);
(*(here->BSIM4v7GPdpPtr) += here->BSIM4v7_31);
(*(here->BSIM4v7GPspPtr) += here->BSIM4v7_32);
(*(here->BSIM4v7GPbpPtr) += here->BSIM4v7_33);
}
else
{ (*(here->BSIM4v7GPgpPtr) += here->BSIM4v7_34);
(*(here->BSIM4v7GPdpPtr) += here->BSIM4v7_35);
(*(here->BSIM4v7GPspPtr) += here->BSIM4v7_36);
(*(here->BSIM4v7GPbpPtr) += here->BSIM4v7_37);
}
if (model->BSIM4v7rdsMod)
{ (*(here->BSIM4v7DgpPtr) += here->BSIM4v7_38);
(*(here->BSIM4v7DspPtr) += here->BSIM4v7_39);
(*(here->BSIM4v7DbpPtr) += here->BSIM4v7_40);
(*(here->BSIM4v7SdpPtr) += here->BSIM4v7_41);
(*(here->BSIM4v7SgpPtr) += here->BSIM4v7_42);
(*(here->BSIM4v7SbpPtr) += here->BSIM4v7_43);
}
(*(here->BSIM4v7DPdpPtr) += here->BSIM4v7_44);
(*(here->BSIM4v7DPdPtr) -= here->BSIM4v7_45);
(*(here->BSIM4v7DPgpPtr) += here->BSIM4v7_46);
(*(here->BSIM4v7DPspPtr) -= here->BSIM4v7_47);
(*(here->BSIM4v7DPbpPtr) -= here->BSIM4v7_48);
(*(here->BSIM4v7DdpPtr) -= here->BSIM4v7_49);
(*(here->BSIM4v7DdPtr) += here->BSIM4v7_50);
(*(here->BSIM4v7SPdpPtr) -= here->BSIM4v7_51);
(*(here->BSIM4v7SPgpPtr) += here->BSIM4v7_52);
(*(here->BSIM4v7SPspPtr) += here->BSIM4v7_53);
(*(here->BSIM4v7SPsPtr) -= here->BSIM4v7_54);
(*(here->BSIM4v7SPbpPtr) -= here->BSIM4v7_55);
(*(here->BSIM4v7SspPtr) -= here->BSIM4v7_56);
(*(here->BSIM4v7SsPtr) += here->BSIM4v7_57);
(*(here->BSIM4v7BPdpPtr) += here->BSIM4v7_58);
(*(here->BSIM4v7BPgpPtr) += here->BSIM4v7_59);
(*(here->BSIM4v7BPspPtr) += here->BSIM4v7_60);
(*(here->BSIM4v7BPbpPtr) += here->BSIM4v7_61);
/* stamp gidl */
(*(here->BSIM4v7DPdpPtr) += here->BSIM4v7_62);
(*(here->BSIM4v7DPgpPtr) += here->BSIM4v7_63);
(*(here->BSIM4v7DPspPtr) -= here->BSIM4v7_64);
(*(here->BSIM4v7DPbpPtr) += here->BSIM4v7_65);
(*(here->BSIM4v7BPdpPtr) -= here->BSIM4v7_66);
(*(here->BSIM4v7BPgpPtr) -= here->BSIM4v7_67);
(*(here->BSIM4v7BPspPtr) += here->BSIM4v7_68);
(*(here->BSIM4v7BPbpPtr) -= here->BSIM4v7_69);
/* stamp gisl */
(*(here->BSIM4v7SPdpPtr) -= here->BSIM4v7_70);
(*(here->BSIM4v7SPgpPtr) += here->BSIM4v7_71);
(*(here->BSIM4v7SPspPtr) += here->BSIM4v7_72);
(*(here->BSIM4v7SPbpPtr) += here->BSIM4v7_73);
(*(here->BSIM4v7BPdpPtr) += here->BSIM4v7_74);
(*(here->BSIM4v7BPgpPtr) -= here->BSIM4v7_75);
(*(here->BSIM4v7BPspPtr) -= here->BSIM4v7_76);
(*(here->BSIM4v7BPbpPtr) -= here->BSIM4v7_77);
if (here->BSIM4v7rbodyMod)
{ (*(here->BSIM4v7DPdbPtr) += here->BSIM4v7_78);
(*(here->BSIM4v7SPsbPtr) -= here->BSIM4v7_79);
(*(here->BSIM4v7DBdpPtr) += here->BSIM4v7_80);
(*(here->BSIM4v7DBdbPtr) += here->BSIM4v7_81);
(*(here->BSIM4v7DBbpPtr) -= here->BSIM4v7_82);
(*(here->BSIM4v7DBbPtr) -= here->BSIM4v7_83);
(*(here->BSIM4v7BPdbPtr) -= here->BSIM4v7_84);
(*(here->BSIM4v7BPbPtr) -= here->BSIM4v7_85);
(*(here->BSIM4v7BPsbPtr) -= here->BSIM4v7_86);
(*(here->BSIM4v7BPbpPtr) += here->BSIM4v7_87);
(*(here->BSIM4v7SBspPtr) += here->BSIM4v7_88);
(*(here->BSIM4v7SBbpPtr) -= here->BSIM4v7_89);
(*(here->BSIM4v7SBbPtr) -= here->BSIM4v7_90);
(*(here->BSIM4v7SBsbPtr) += here->BSIM4v7_91);
(*(here->BSIM4v7BdbPtr) -= here->BSIM4v7_92);
(*(here->BSIM4v7BbpPtr) -= here->BSIM4v7_93);
(*(here->BSIM4v7BsbPtr) -= here->BSIM4v7_94);
(*(here->BSIM4v7BbPtr) += here->BSIM4v7_95);
}
if (here->BSIM4v7trnqsMod)
{ (*(here->BSIM4v7QqPtr) += here->BSIM4v7_96);
(*(here->BSIM4v7QgpPtr) += here->BSIM4v7_97);
(*(here->BSIM4v7QdpPtr) += here->BSIM4v7_98);
(*(here->BSIM4v7QspPtr) += here->BSIM4v7_99);
(*(here->BSIM4v7QbpPtr) += here->BSIM4v7_100);
(*(here->BSIM4v7DPqPtr) += here->BSIM4v7_101);
(*(here->BSIM4v7SPqPtr) += here->BSIM4v7_102);
(*(here->BSIM4v7GPqPtr) -= here->BSIM4v7_103);
}
}
}
#endif
|
elemwise_binary_op.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2016 by Contributors
* \file elemwise_binary_op.h
* \brief Function definition of elementwise binary operators
*/
#ifndef MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
#define MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
#include <mxnet/operator_util.h>
#include <mxnet/op_attr_types.h>
#include <vector>
#include <string>
#include <utility>
#include <typeinfo>
#include <algorithm>
#include "../mxnet_op.h"
#include "../mshadow_op.h"
#include "../../engine/openmp.h"
#include "elemwise_unary_op.h"
#include "../../common/utils.h"
#include "./init_op.h"
namespace mxnet {
namespace op {
/*! Gather binary operator functions into ElemwiseBinaryOp class */
class ElemwiseBinaryOp : public OpBase {
public:
/*! \brief For sparse, assume missing rvalue is 0 */
template<typename OP, int Req>
struct MissingRValueOp {
typedef OP Operation;
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType *lhs) {
KERNEL_ASSIGN(out[i], Req, OP::Map(lhs[i], DType(0)));
}
};
/*! \brief For sparse, assume missing lvalue is 0 */
template<typename OP, int Req>
struct MissingLValueOp {
typedef OP Operation;
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType *rhs) {
KERNEL_ASSIGN(out[i], Req, OP::Map(DType(0), rhs[i]));
}
};
private:
/*!
* \brief CSR operation requires temp space
*/
enum ResourceRequestType {
kTempSpace
};
/*!
* \brief Fill contiguous dense output rows with value computed from 0 lhs and 0 rhs input
* CPU-Only version
*/
template<typename DType, typename OP, typename xpu>
static inline size_t FillDense(mshadow::Stream<xpu> *s,
const size_t idx_l,
const size_t idx_r,
const OpReqType req,
mshadow::Tensor<xpu, 2, DType> *out,
const size_t iter_out) {
const int index_out_min = static_cast<int>(std::min(idx_l, idx_r));
if (static_cast<size_t>(index_out_min) > iter_out) {
const DType zero_input_val = OP::Map(DType(0), DType(0));
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (int i = static_cast<int>(iter_out); i < index_out_min; ++i) {
Fill<false>(s, (*out)[i], req, zero_input_val);
}
}
return static_cast<size_t>(index_out_min); // MSVC wants OMP loops to always use 'int'
}
static inline bool IsSameArray(const NDArray& a1, const NDArray& a2) {
return a1.var() == a2.var();
}
/*! \brief Minimum of three */
static MSHADOW_XINLINE size_t minthree(const size_t a, const size_t b, const size_t c) {
return a < b ? (a < c ? a : c) : (b < c ? b : c);
}
template<typename xpu, typename LOP, typename ROP, typename DType>
static void BackwardUseNone_(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mxnet_op;
Stream<xpu> *s = ctx.get_stream<xpu>();
const int size = static_cast<int>((outputs[0].Size() + DataType<DType>::kLanes - 1)
/ DataType<DType>::kLanes);
const DType *ograd_dptr = inputs[0].dptr<DType>();
if (std::is_same<LOP, mshadow_op::identity>::value && req[0] == kWriteInplace) {
CHECK_EQ(ograd_dptr, outputs[0].dptr<DType>());
} else if (req[0] != kNullOp) {
DType *lgrad_dptr = outputs[0].dptr<DType>();
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
Kernel<mxnet_op::op_with_req<LOP, Req>, xpu>::Launch(s, size, lgrad_dptr, ograd_dptr);
});
}
if (std::is_same<ROP, mshadow_op::identity>::value && req[1] == kWriteInplace) {
CHECK_EQ(ograd_dptr, outputs[1].dptr<DType>());
} else if (req[1] != kNullOp) {
DType *rgrad_dptr = outputs[1].dptr<DType>();
MXNET_ASSIGN_REQ_SWITCH(req[1], Req, {
Kernel<mxnet_op::op_with_req<ROP, Req>, xpu>::Launch(s, size, rgrad_dptr, ograd_dptr);
});
}
}
template<typename xpu, typename LOP, typename ROP, typename DType>
static void BackwardUseIn_(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
DCHECK_EQ(outputs.size(), 2U);
DCHECK_EQ(inputs.size(), 3U);
mxnet_op::Stream<xpu> *s = ctx.get_stream<xpu>();
const DType *ograd_dptr = inputs[0].dptr<DType>();
const DType *lhs_dptr = inputs[1].dptr<DType>();
const DType *rhs_dptr = inputs[2].dptr<DType>();
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
const int size = static_cast<int>(
(outputs[0].Size() + mxnet_op::DataType<DType>::kLanes - 1)
/ mxnet_op::DataType<DType>::kLanes);
DType * lgrad_dptr = outputs[0].dptr<DType>();
mxnet_op::Kernel<mxnet_op::op_with_req<mxnet_op::backward_grad_tuned<LOP>, Req>, xpu>::Launch(
s, size, lgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr);});
MXNET_ASSIGN_REQ_SWITCH(req[1], Req, {
const int size = static_cast<int>(
(outputs[1].Size() + mxnet_op::DataType<DType>::kLanes - 1)
/ mxnet_op::DataType<DType>::kLanes);
DType * rgrad_dptr = outputs[1].dptr<DType>();
mxnet_op::Kernel<mxnet_op::op_with_req<mxnet_op::backward_grad_tuned<ROP>, Req>, xpu>::Launch(
s, size, rgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr);});
}
template<
typename xpu,
typename LOP,
typename ROP,
bool in0_ok_dense = false,
bool in1_ok_dense = false,
bool in2_ok_dense = false,
typename BackupCompute>
static inline void RspRspOpBackward(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs,
BackupCompute backup_compute) {
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
// lhs grad
if (req[0] != kNullOp) {
// RspRspOp can handle dense outputs so long as OP(0, 0) == 0
RspRspOp<LOP>(
s, attrs, ctx, inputs[1], inputs[2], req[0], outputs[0],
false, false, false, false);
// lhs in-place
RspRspOp<op::mshadow_op::mul>(
s, attrs, ctx, outputs[0], inputs[0], req[0], outputs[0],
false, false, true, false);
}
// rhs grad
if (req[1] != kNullOp) {
RspRspOp<ROP>(
s, attrs, ctx, inputs[1], inputs[2], req[1], outputs[1],
false, false, false, false);
// rhs in-place
RspRspOp<op::mshadow_op::mul>(
s, attrs, ctx, inputs[0], outputs[1], req[1], outputs[1],
false, false, true, false);
}
}
template<typename xpu, typename LOP, typename ROP>
static inline void DnsCsrCsrOpBackward(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
const bool supported_ops = std::is_same<mshadow_op::right, LOP>::value &&
std::is_same<mshadow_op::left, ROP>::value;
CHECK(supported_ops)
<< "Only backward for mul is supported (LOP should be right, ROP should be left)";
const NDArray& out_grad = inputs[0];
const NDArray& lhs_in = inputs[1];
const NDArray& rhs_in = inputs[2];
const NDArray& lhs_grad = outputs[0];
const NDArray& rhs_grad = outputs[1];
const bool reverse = (outputs[0].storage_type() == kCSRStorage);
if (reverse) {
DnsCsrCsrOp<xpu, mshadow_op::mul>(attrs, ctx, out_grad, rhs_in, req[0], lhs_grad, false);
Compute<xpu, mshadow_op::mul>(attrs, ctx, {out_grad.data(), lhs_in.data()}, {req[1]},
{rhs_grad.data()});
} else {
DnsCsrCsrOp<xpu, mshadow_op::mul>(attrs, ctx, out_grad, lhs_in, req[1], rhs_grad, false);
Compute<xpu, mshadow_op::mul>(attrs, ctx, {out_grad.data(), rhs_in.data()}, {req[0]},
{lhs_grad.data()});
}
}
public:
/*! \brief Binary op handling for lhr/rhs: RspDns, RspRsp, DnsRsp, or RspRsp->Dns result */
template<typename OP>
static void RspRspOp(mshadow::Stream<cpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output,
bool lhs_may_be_dense,
bool rhs_may_be_dense,
bool allow_inplace,
bool scatter);
/*! \brief Binary op handling for lhr/rhs: RspDns, RspRsp, DnsRsp, or RspRsp->Dns result */
template<typename OP>
static void RspRspOp(mshadow::Stream<gpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output,
bool lhs_may_be_dense,
bool rhs_may_be_dense,
bool allow_inplace,
bool scatter);
/*! \brief CSR -op- CSR binary operator for non-canonical NDArray */
template<typename OP>
static void CsrCsrOp(mshadow::Stream<cpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output);
/*! \brief CSR -op- CSR binary operator for non-canonical NDArray */
template<typename OP>
static void CsrCsrOp(mshadow::Stream<gpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output);
/*! \brief DNS -op- CSR binary operator for non-canonical NDArray */
template<typename OP>
static void DnsCsrDnsOp(mshadow::Stream<cpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output,
const bool reverse);
/*! \brief DNS -op- CSR binary operator for non-canonical NDArray */
template<typename OP>
static void DnsCsrDnsOp(mshadow::Stream<gpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output,
const bool reverse);
/*! \brief DNS -op- CSR binary operator for non-canonical NDArray */
template<typename xpu, typename OP>
static void DnsCsrCsrOp(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output,
const bool reverse);
/*! \brief DNS -op- RSP binary operator for non-canonical NDArray */
template<typename xpu, typename OP>
static void DnsRspDnsOp(mshadow::Stream<xpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output,
const bool reverse);
public:
/*!
* \brief Rsp-op-Rsp operation which produces a dense result
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
static bool SparseSparseWithDenseResult(const nnvm::NodeAttrs& attrs,
int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs);
/*!
* \brief Allow one of the binary inputs to be dense and still produce a sparse output.
* Typically used for sparse * dense = sparse.
* Note: for csr, it dispatches to fallback other than csr, csr -> csr
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
static bool PreferSparseStorageType(const nnvm::NodeAttrs& attrs,
int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
using namespace common;
CHECK_EQ(in_attrs->size(), 2U) << " in operator " << attrs.name;
CHECK_EQ(out_attrs->size(), 1U) << " in operator " << attrs.name;
const auto& lhs_stype = in_attrs->at(0);
const auto& rhs_stype = in_attrs->at(1);
auto& out_stype = out_attrs->at(0);
bool dispatched = false;
const bool invalid_ctx = dev_mask != mshadow::cpu::kDevMask;
const auto dispatch_ex = invalid_ctx ? DispatchMode::kFComputeFallback :
DispatchMode::kFComputeEx;
if (!dispatched && ContainsOnlyStorage(*in_attrs, kDefaultStorage)) {
// dns, dns -> dns
dispatched = storage_type_assign(&out_stype, kDefaultStorage,
dispatch_mode, DispatchMode::kFCompute);
}
if (!dispatched && ContainsOnlyStorage(*in_attrs, kRowSparseStorage)) {
// rsp, rsp -> rsp
dispatched = storage_type_assign(&out_stype, kRowSparseStorage,
dispatch_mode, dispatch_ex);
}
if (!dispatched && ContainsOnlyStorage(*in_attrs, kCSRStorage)) {
// csr, csr -> csr
dispatched = storage_type_assign(&out_stype, kCSRStorage,
dispatch_mode, dispatch_ex);
}
if (!dispatched &&
((lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage))) {
// rsp, dns -> rsp
// dns, rsp -> rsp
dispatched = storage_type_assign(&out_stype, kRowSparseStorage,
dispatch_mode, dispatch_ex);
}
if (!dispatched &&
((lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage))) {
// csr, dns -> csr
// dns, csr -> csr
dispatched = storage_type_assign(&out_stype, kCSRStorage,
dispatch_mode, DispatchMode::kFComputeEx);
}
if (!dispatched) {
dispatched = dispatch_fallback(out_attrs, dispatch_mode);
}
return dispatched;
}
/*!
* \brief Allow one of the inputs to be dense and produce a dense output,
* for rsp inputs only support when both inputs are rsp type.
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
template<bool cpu_only, bool rsp, bool csr>
static bool PreferDenseStorageType(const nnvm::NodeAttrs& attrs,
const int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
using namespace common;
CHECK_EQ(in_attrs->size(), 2);
CHECK_EQ(out_attrs->size(), 1);
const auto lhs_stype = (*in_attrs)[0];
const auto rhs_stype = (*in_attrs)[1];
bool dispatched = false;
const bool invalid_ctx = cpu_only && dev_mask != mshadow::cpu::kDevMask;
const auto dispatch_ex = invalid_ctx ? DispatchMode::kFComputeFallback :
DispatchMode::kFComputeEx;
if (!dispatched && ContainsOnlyStorage(*in_attrs, kDefaultStorage)) {
// dns, dns ... -> dns
dispatched = storage_type_assign(out_attrs, kDefaultStorage,
dispatch_mode, DispatchMode::kFCompute);
}
if (!dispatched && rsp && ContainsOnlyStorage(*in_attrs, kRowSparseStorage)) {
// rsp, rsp, ... -> rsp
dispatched = storage_type_assign(out_attrs, kRowSparseStorage,
dispatch_mode, DispatchMode::kFComputeEx);
}
if (!dispatched && csr && ContainsOnlyStorage(*in_attrs, kCSRStorage)) {
// csr, csr, ... -> csr
dispatched = storage_type_assign(out_attrs, kCSRStorage,
dispatch_mode, dispatch_ex);
}
if (!dispatched && ((lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage) ||
(lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage))) {
// dense, csr -> dense / csr, dense -> dense
dispatched = storage_type_assign(out_attrs, kDefaultStorage,
dispatch_mode, DispatchMode::kFComputeEx);
}
if (!dispatched && ((lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage) ||
(lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage))) {
// dense, rsp -> dense / rsp, dense -> dense
dispatched = storage_type_assign(out_attrs, kDefaultStorage,
dispatch_mode, DispatchMode::kFComputeEx);
}
if (!dispatched) {
dispatch_fallback(out_attrs, dispatch_mode);
}
return true;
}
/*!
* \brief Backward pass computing input gradient using forward inputs
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
static bool BackwardUseInStorageType(const nnvm::NodeAttrs& attrs,
int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs);
template<typename xpu, typename OP>
static void Compute(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mxnet_op;
if (req[0] != kNullOp) {
Stream<xpu> *s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size())
+ DataType<DType>::kLanes - 1) / DataType<DType>::kLanes;
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size,
outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(), inputs[1].dptr<DType>());
});
});
}
}
template<typename xpu, typename OP>
static void ComputeWithHalf2(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mxnet_op;
if (req[0] != kNullOp) {
Stream<xpu> *s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size())
+ DataType<DType>::kLanes - 1) / DataType<DType>::kLanes;
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size,
outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(), inputs[1].dptr<DType>());
});
});
}
}
template<typename xpu, typename OP>
static void ComputeEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
using namespace common;
CHECK_EQ(inputs.size(), 2);
CHECK_EQ(outputs.size(), 1);
if (req[0] == kNullOp) return;
const auto lhs_stype = inputs[0].storage_type();
const auto rhs_stype = inputs[1].storage_type();
const auto out_stype = outputs[0].storage_type();
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
if ((ContainsOnlyStorage(inputs, kRowSparseStorage)) &&
(out_stype == kRowSparseStorage || out_stype == kDefaultStorage)) {
// rsp, rsp -> rsp
// rsp, rsp -> dns
RspRspOp<OP>(
s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0], false, false, false, false);
} else if (ContainsOnlyStorage(inputs, kCSRStorage) && out_stype == kCSRStorage) {
// csr, csr -> csr
CsrCsrOp<OP>(s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0]);
} else if (((lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage)) &&
out_stype == kDefaultStorage) {
const NDArray& dns = (lhs_stype == kDefaultStorage)? inputs[0] : inputs[1];
const NDArray& csr = (lhs_stype == kCSRStorage)? inputs[0] : inputs[1];
const bool reverse = (lhs_stype == kCSRStorage);
DnsCsrDnsOp<OP>(s, attrs, ctx, dns, csr, req[0], outputs[0], reverse);
} else if (((lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) &&
out_stype == kDefaultStorage) {
const NDArray& dns = (lhs_stype == kDefaultStorage)? inputs[0] : inputs[1];
const bool reverse = (lhs_stype == kRowSparseStorage);
const NDArray& rsp = (reverse)? inputs[0] : inputs[1];
DnsRspDnsOp<xpu, OP>(s, attrs, ctx, dns, rsp, req[0], outputs[0], reverse);
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
/*! \brief ComputeEx allowing dense lvalue and/or rvalue */
template<typename xpu, typename OP, bool lhs_may_be_dense, bool rhs_may_be_dense>
static void ComputeDnsLRValueEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(inputs.size(), 2);
CHECK_EQ(outputs.size(), 1);
if (req[0] == kNullOp) return;
const auto lhs_stype = inputs[0].storage_type();
const auto rhs_stype = inputs[1].storage_type();
const auto out_stype = outputs[0].storage_type();
if ((out_stype == kRowSparseStorage || out_stype == kDefaultStorage) &&
((lhs_stype == kRowSparseStorage && rhs_stype == kRowSparseStorage) ||
(lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) &&
lhs_may_be_dense && rhs_may_be_dense) {
// rsp, rsp -> rsp
// rsp, rsp -> dns
// rsp, dns -> rsp
// dns, rsp -> rsp
// More than once dense not allowed (this will be checked in RspRspOp):
// rsp, dns -> dns <-- NOT ALLOWED
// dns, rsp -> dns <-- NOT ALLOWED
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
RspRspOp<OP>(
s, attrs, ctx, inputs[0], inputs[1],
req[0], outputs[0], lhs_may_be_dense, rhs_may_be_dense, false, false);
} else if (lhs_stype == kCSRStorage && rhs_stype == kCSRStorage) {
ComputeEx<xpu, OP>(attrs, ctx, inputs, req, outputs);
} else if (((lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage)) &&
out_stype == kCSRStorage) {
const NDArray& dns = (lhs_stype == kDefaultStorage)? inputs[0] : inputs[1];
const NDArray& csr = (lhs_stype == kCSRStorage)? inputs[0] : inputs[1];
const bool reverse = (lhs_stype == kCSRStorage);
DnsCsrCsrOp<xpu, OP>(attrs, ctx, dns, csr, req[0], outputs[0], reverse);
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseNone(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
BackwardUseNone_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs);
});
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseNoneWithHalf2(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, {
BackwardUseNone_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs);
});
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseNoneEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
CHECK_EQ(inputs.size(), 1U); // output grad
CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad
const auto in_stype = inputs[0].storage_type();
const auto lhs_stype = outputs[0].storage_type();
const auto rhs_stype = outputs[1].storage_type();
// lhs grad
if (req[0] != kNullOp) {
if (in_stype == lhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) {
CHECK_EQ(outputs[0].storage_type(), in_stype);
// rsp -> rsp, _. op requires 0-input returns 0-output
DCHECK_LT(std::fabs(static_cast<float>(LOP::Map(0))), 1e-5f);
UnaryOp::ComputeEx<xpu, LOP>(attrs, ctx, inputs, req, {outputs[0]});
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
// rhs grad
if (req[1] != kNullOp) {
if (in_stype == rhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) {
CHECK_EQ(outputs[0].storage_type(), in_stype);
// rsp -> _, rsp. op requires 0-input returns 0-output
DCHECK_LT(std::fabs(static_cast<float>(ROP::Map(0))), 1e-5f);
UnaryOp::ComputeEx<xpu, ROP>(attrs, ctx, inputs, req, {outputs[1]});
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseIn(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
BackwardUseIn_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs);
});
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseInWithHalf2(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, {
BackwardUseIn_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs);
});
}
template<
typename xpu, typename LOP, typename ROP,
bool in0_ok_dense = false, bool in1_ok_dense = false, bool in2_ok_dense = false>
static inline void BackwardUseInEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
using namespace common;
CHECK_EQ(inputs.size(), 3U);
CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad
const auto out_grad_stype = inputs[0].storage_type();
const auto lhs_grad_stype = outputs[0].storage_type();
const auto rhs_grad_stype = outputs[1].storage_type();
if (ContainsOnlyStorage(inputs, kRowSparseStorage) &&
(lhs_grad_stype == kDefaultStorage || lhs_grad_stype == kRowSparseStorage) &&
(rhs_grad_stype == kDefaultStorage || rhs_grad_stype == kRowSparseStorage)) {
// rsp, rsp, rsp -> [dns, rsp], [dns, rsp]
RspRspOpBackward<xpu, LOP, ROP, in0_ok_dense, in1_ok_dense, in2_ok_dense>(
attrs, ctx, inputs, req, outputs, BackwardUseIn<xpu, LOP, ROP>);
}
if (((lhs_grad_stype == kDefaultStorage && rhs_grad_stype == kCSRStorage) ||
(lhs_grad_stype == kCSRStorage && rhs_grad_stype == kDefaultStorage)) &&
out_grad_stype == kDefaultStorage) {
// dns, csr, dns -> [csr, dns] / csr, dns, dns -> [dns, csr]
DnsCsrCsrOpBackward<xpu, LOP, ROP>(attrs, ctx, inputs, req, outputs);
}
}
}; // class ElemwiseBinaryOp
/*! \brief Binary launch */
#define MXNET_OPERATOR_REGISTER_BINARY(name) \
NNVM_REGISTER_OP(name) \
.set_num_inputs(2) \
.set_num_outputs(1) \
.set_attr<nnvm::FListInputNames>("FListInputNames", \
[](const NodeAttrs& attrs) { \
return std::vector<std::string>{"lhs", "rhs"}; \
}) \
.set_attr<mxnet::FInferShape>("FInferShape", ElemwiseShape<2, 1>) \
.set_attr<nnvm::FInferType>("FInferType", ElemwiseType<2, 1>) \
.set_attr<nnvm::FInplaceOption>("FInplaceOption", \
[](const NodeAttrs& attrs){ \
return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}}; \
}) \
.add_argument("lhs", "NDArray-or-Symbol", "first input") \
.add_argument("rhs", "NDArray-or-Symbol", "second input")
/*! \brief Binary launch, with FComputeEx for csr and rsp available */
#define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU(__name$, __kernel$) \
MXNET_OPERATOR_REGISTER_BINARY(__name$) \
.set_attr<FInferStorageType>("FInferStorageType", \
ElemwiseStorageType<2, 1, true, true, true>) \
.set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \
.set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \
.set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \
[](const NodeAttrs& attrs) { \
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};})
/*! \brief Binary launch, with FComputeEx for csr and rsp available.
when inputs contain both sparse and dense, sparse output is preferred. */
#define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_PS(__name$, __kernel$) \
MXNET_OPERATOR_REGISTER_BINARY(__name$) \
.set_attr<FInferStorageType>("FInferStorageType", \
ElemwiseBinaryOp::PreferSparseStorageType) \
.set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \
.set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \
.set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \
[](const NodeAttrs& attrs) { \
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};})
/*! \brief Binary launch, dense result
* FInferStorageType attr is not set using this macro.
* By default DefaultStorageType is used.
*/
#define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(__name$, __kernel$) \
MXNET_OPERATOR_REGISTER_BINARY(__name$) \
.set_attr<FInferStorageType>("FInferStorageType", \
ElemwiseBinaryOp::SparseSparseWithDenseResult) \
.set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \
.set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>)
/*! \brief Binary launch, with FComputeEx for prefer dense */
#define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_PD(__name$, __kernel$) \
MXNET_OPERATOR_REGISTER_BINARY(__name$) \
.set_attr<FInferStorageType>("FInferStorageType", \
ElemwiseBinaryOp::PreferDenseStorageType<true, true, true>) \
.set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \
.set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \
.set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \
[](const NodeAttrs& attrs) { \
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};})
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
|
sha256.c | /*
* Copyright (c) 2010, Michal Tomlein
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Original code by Angel Marin, Paul Johnston.
*/
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <omp.h>
#include "sha256.h"
typedef struct {
uint32_t * data;
unsigned int length;
} uint32_a;
void uint32_a_init(uint32_a * a, unsigned int length) {
a->data = (uint32_t *)calloc(length, sizeof(uint32_t));
a->length = length;
}
void uint32_a_resize(uint32_a * a, unsigned int length) {
if (a->length == length)
return;
a->data = (uint32_t *)realloc(a->data, length * sizeof(uint32_t));
for (unsigned int i = a->length; i < length; i++)
a->data[i] = 0;
a->length = length;
}
void uint32_a_free(uint32_a * a) {
free(a->data);
a->data = NULL;
a->length = 0;
}
int uint32_a_eq(uint32_a a, uint32_a b) {
if (a.length != b.length)
return 0;
for (int i = 0; i < a.length; ++i)
if (a.data[i] != b.data[i])
return 0;
return 1;
}
uint32_a bytes_to_binb(const char * bytes, unsigned int length) {
uint32_t mask = (1 << 8) - 1;
length *= 8;
uint32_a bin;
uint32_a_init(&bin, ((length - 1) >> 5) + 1);
for (uint32_t i = 0; i < length; i += 8) {
bin.data[i >> 5] |= (bytes[i / 8] & mask) << (32 - 8 - i % 32);
}
return bin;
}
char * binb_to_hex(const uint32_t binarray[], uint32_t length, char * result) {
const char * hex_tab = "0123456789abcdef";
if (!result)
result = (char *)malloc(65 * sizeof(char));
result[64] = '\0';
length *= 4;
for (uint32_t i = 0; i < length; i++) {
result[2 * i] = hex_tab[(binarray[i >> 2] >> ((3 - i % 4) * 8 + 4)) & 0xF];
result[2 * i + 1] = hex_tab[(binarray[i >> 2] >> ((3 - i % 4) * 8)) & 0xF];
}
return result;
}
uint32_t rotr(uint32_t x, int n) {
if (n < 32) return (x >> n) | (x << (32 - n));
return x;
}
uint32_t ch(uint32_t x, uint32_t y, uint32_t z) {
return (x & y) ^ (~x & z);
}
uint32_t maj(uint32_t x, uint32_t y, uint32_t z) {
return (x & y) ^ (x & z) ^ (y & z);
}
uint32_t sigma0(uint32_t x) {
return rotr(x, 2) ^ rotr(x, 13) ^ rotr(x, 22);
}
uint32_t sigma1(uint32_t x) {
return rotr(x, 6) ^ rotr(x, 11) ^ rotr(x, 25);
}
uint32_t gamma0(uint32_t x) {
return rotr(x, 7) ^ rotr(x, 18) ^ (x >> 3);
}
uint32_t gamma1(uint32_t x) {
return rotr(x, 17) ^ rotr(x, 19) ^ (x >> 10);
}
void sha256core(uint32_a message_a, uint32_t source_binlength, uint32_t H[]) {
uint32_t K[] = {
0x428A2F98, 0x71374491, 0xB5C0FBCF, 0xE9B5DBA5, 0x3956C25B,
0x59F111F1, 0x923F82A4, 0xAB1C5ED5, 0xD807AA98, 0x12835B01,
0x243185BE, 0x550C7DC3, 0x72BE5D74, 0x80DEB1FE, 0x9BDC06A7,
0xC19BF174, 0xE49B69C1, 0xEFBE4786, 0x0FC19DC6, 0x240CA1CC,
0x2DE92C6F, 0x4A7484AA, 0x5CB0A9DC, 0x76F988DA, 0x983E5152,
0xA831C66D, 0xB00327C8, 0xBF597FC7, 0xC6E00BF3, 0xD5A79147,
0x06CA6351, 0x14292967, 0x27B70A85, 0x2E1B2138, 0x4D2C6DFC,
0x53380D13, 0x650A7354, 0x766A0ABB, 0x81C2C92E, 0x92722C85,
0xA2BFE8A1, 0xA81A664B, 0xC24B8B70, 0xC76C51A3, 0xD192E819,
0xD6990624, 0xF40E3585, 0x106AA070, 0x19A4C116, 0x1E376C08,
0x2748774C, 0x34B0BCB5, 0x391C0CB3, 0x4ED8AA4A, 0x5B9CCA4F,
0x682E6FF3, 0x748F82EE, 0x78A5636F, 0x84C87814, 0x8CC70208,
0x90BEFFFA, 0xA4506CEB, 0xBEF9A3F7, 0xC67178F2
};
H[0] = 0x6A09E667; H[1] = 0xBB67AE85; H[2] = 0x3C6EF372; H[3] = 0xA54FF53A;
H[4] = 0x510E527F; H[5] = 0x9B05688C; H[6] = 0x1F83D9AB; H[7] = 0x5BE0CD19;
uint32_a W_a;
uint32_a_init(&W_a, 64);
uint32_t * W = W_a.data;
uint32_t a, b, c, d, e, f, g, h;
uint32_t T1, T2;
uint32_t message_length = (((source_binlength + 1 + 64) >> 9) << 4) + 16;
uint32_a_resize(&message_a, message_length);
uint32_t * message = message_a.data;
message[source_binlength >> 5] |= 0x80 << (24 - source_binlength % 32);
message[message_length - 1] = source_binlength;
for (uint32_t i = 0; i < message_length; i += 16) {
a = H[0]; b = H[1]; c = H[2]; d = H[3]; e = H[4]; f = H[5]; g = H[6]; h = H[7];
for (uint32_t t = 0; t < 64; t++) {
if (t < 16) W[t] = message[t + i];
else W[t] = gamma1(W[t - 2]) + W[t - 7] + gamma0(W[t - 15]) + W[t - 16];
T1 = h + sigma1(e) + ch(e, f, g) + K[t] + W[t];
T2 = sigma0(a) + maj(a, b, c);
h = g; g = f; f = e; e = d + T1; d = c; c = b; b = a; a = T1 + T2;
}
H[0] += a; H[1] += b; H[2] += c; H[3] += d; H[4] += e; H[5] += f; H[6] += g; H[7] += h;
}
uint32_a_free(&message_a);
uint32_a_free(&W_a);
}
char * sha256(const char * source, unsigned int length, char * result) {
uint32_t H[8];
sha256core(bytes_to_binb(source, length), length * 8, H);
return binb_to_hex(H, 8, result);
}
char * reverse_sha256(const char * hash, int min_length, int max_length, const char * character_set) {
int character_set_length = strlen(character_set), str_length;
char * str_hash;
char * str;
int i, t, range;
int64_t id, remainder;
int64_t max_id = pow(character_set_length, max_length);
int done = 0;
char * result = NULL;
#pragma omp parallel private(id, i, t, range, remainder, str, str_hash)
{
str_hash = (char *)malloc(65 * sizeof(char));
str = (char *)malloc((max_length + 1) * sizeof(char));
str[max_length] = '\0';
#pragma omp for schedule(dynamic)
for (id = 0; id < max_id; ++id) {
#pragma omp flush (done)
if (done) continue;
str_length = id == 0 ? 0 : (floor(log(id) / log(character_set_length)) + 1);
t = 2 * max_length - str_length - 1;
for (i = 0; i < max_length - str_length; ++i)
str[i] = character_set[0];
for (remainder = id; remainder && i < max_length; ++i) {
str[t - i] = character_set[remainder % character_set_length];
remainder /= character_set_length;
}
range = max_length - (str_length > min_length ? str_length : min_length);
for (i = 0; i <= range; ++i) {
if (strcmp(hash, sha256(str + i, max_length - i, str_hash)) == 0) {
result = (char *)malloc((max_length - i + 1) * sizeof(char));
strncpy(result, str + i, max_length - i);
result[max_length - i] = '\0';
done = 1;
break;
}
}
}
free(str_hash);
free(str);
} /* end parallel */
return result;
}
|
chi_calc.c | ////////////////////////////////////////////////////////
// Chi Square Calculation and Trapezoidal Integration //
// Matheus J. Castro //
// Version 2.3 //
// Last Modification: 06/11/2021 (month/day/year) //
////////////////////////////////////////////////////////
#include<stdbool.h>
#include<stdlib.h>
#include<string.h>
#include<stdio.h>
#include<time.h>
#include<math.h>
#include<omp.h>
#define MAX 100
#define c 299792458 //velocidade da luz em m/s
double e_func(double z, double param[3], double omega_k){
double omega_m_factor = param[0] * pow((1 + z), 3);
double omega_de_factor = param[1] * pow((1 + z), (3 + 3 * param[2]));
double omega_k_factor = omega_k * pow((1 + z), 2);
double sq = 1/sqrt(omega_m_factor + omega_de_factor + omega_k_factor);
return sq;
}
double calc_trapezium_formula(double lim[2], double n, double params[3], double omega_k){
double h = (lim[1] - lim[0]) / n;
double sum = 0;
for(double j = lim[0] + h; j < lim[1]; j = j + 2*h){
sum = sum + e_func(j, params, omega_k);
}
double trapezium = h * sum;
return trapezium;
}
double integrate(double params[3], double omega_k, double lim[2], double eps_desired){
if(lim[0] == lim[1])
return 0;
double result = (e_func(lim[0], params, omega_k) + e_func(lim[1], params, omega_k)) * (lim[1] - lim[0]) / 2;
int count = 0;
double eps = 1;
while(eps >= eps_desired){
count++;
double result_old = result;
double divisions = pow(2, count);
result = result / 2 + calc_trapezium_formula(lim, divisions, params, omega_k);
eps = fabs((result - result_old) / result);
}
return result;
}
double comoving_distance(double h0, double redshift, double params[3], double precision){
double omega_k = 1 - (params[0] + params[1]);
double hubble_distance = c / h0;
double factor_k;
double lim[2] = {0, redshift};
double integration_result = integrate(params, omega_k, lim, precision);
if(omega_k == 0){
factor_k = hubble_distance * integrate(params, omega_k, lim, precision);
}
else if(omega_k > 0){
double sqr_om = sqrt(omega_k);
factor_k = hubble_distance / sqr_om * sinh(sqr_om * integration_result);
}
else{
double sqr_om = sqrt(fabs(omega_k));
factor_k = hubble_distance / sqr_om * sin(sqr_om * integration_result);
}
return factor_k;
}
double luminosity_distance(double h0, double redshift, double params[3], double precision){
double lum = (1 + redshift) * comoving_distance(h0, redshift, params, precision);
return lum;
}
double dist_mod(double dist_lum){
//dist_lum is the luminosity distance in Mpc
double mod = 5 * log10(dist_lum * pow(10, 6) / 10);
return mod;
}
double lumin_dist_mod_func(double h0, double redshift, double params[3], double precision){
double mpc_to_km = 3.086E+19; //conversão de Mpc para km
h0 = h0 / mpc_to_km;
double lum_dist_val = luminosity_distance(h0, redshift, params, precision);
lum_dist_val = lum_dist_val * pow(10, -3) / mpc_to_km; //conversão de m para Mpc
double dist_mod_val = dist_mod(lum_dist_val);
return dist_mod_val;
}
double calc_chi(int h0, int nrows, double data[3][nrows], double params[3], double precision){
double chi2 = 0;
#pragma omp parallel shared(h0, nrows, data, params, precision)
{
//if(omp_get_thread_num() == 0){
// printf("Executing for %d thread(s).\n", omp_get_num_threads());
//}
#pragma omp for reduction(+: chi2)
for(int i = 0; i < nrows; i++){
double teor_data = lumin_dist_mod_func(h0, data[0][i], params, precision);
chi2 += pow((data[1][i] - teor_data) / data[2][i], 2);
}
}
return chi2;
}
double main_execution(char fl_name[MAX], double h0, double omega_m, double omega_ee, double w, double precision, int nrows){
FILE *csv;
char header[4][MAX];
double data[3][nrows];
csv = fopen(fl_name, "r");
if(fscanf(csv, "%s %s %s %s\n", header[0], header[1], header[2], header[3])){}
//printf("%s %s %s %s\n", header[0],header[1], header[2], header[3]);
for (int i = 0; i < nrows ; i++) // Read until the last line.
{
if(fscanf(csv, "%lf %lf %lf\n", &data[0][i], &data[1][i], &data[2][i])){} // Create the matrix of catalogs.
//printf("%.15lf %.15lf %.15lf\n", data[0][i], data[1][i], data[2][i]);
}
fclose(csv);
double params[3] = {omega_m, omega_ee, w};
double chi2 = calc_chi(h0, nrows, data, params, precision);
return chi2;
}
int main(){
double h0 = 70;
double omega_m = 0.3;
double omega_ee = 0.7;
double w = -1;
double precision = 1E-10;
int nrows = 580;
char fl_name[MAX] = "fake_data.cat";
double chi2 = main_execution(fl_name, h0, omega_m, omega_ee, w, precision, nrows);
printf("%e\n", chi2);
return 0;
}
|
GB_binop__isge_uint64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isge_uint64)
// A.*B function (eWiseMult): GB (_AemultB_01__isge_uint64)
// A.*B function (eWiseMult): GB (_AemultB_02__isge_uint64)
// A.*B function (eWiseMult): GB (_AemultB_03__isge_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isge_uint64)
// A*D function (colscale): GB (_AxD__isge_uint64)
// D*A function (rowscale): GB (_DxB__isge_uint64)
// C+=B function (dense accum): GB (_Cdense_accumB__isge_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__isge_uint64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isge_uint64)
// C=scalar+B GB (_bind1st__isge_uint64)
// C=scalar+B' GB (_bind1st_tran__isge_uint64)
// C=A+scalar GB (_bind2nd__isge_uint64)
// C=A'+scalar GB (_bind2nd_tran__isge_uint64)
// C type: uint64_t
// A type: uint64_t
// B,b type: uint64_t
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x >= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGE || GxB_NO_UINT64 || GxB_NO_ISGE_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__isge_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isge_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isge_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isge_uint64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isge_uint64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isge_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__isge_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isge_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__isge_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isge_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isge_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = GBX (Bx, p, false) ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isge_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB (_bind1st_tran__isge_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB (_bind2nd_tran__isge_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d25pt.c | /*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 32;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
heap_mult_generic.h | #ifndef MASKED_SPGEMM_HEAP_MULT_GENERIC_H
#define MASKED_SPGEMM_HEAP_MULT_GENERIC_H
#include <algorithm>
#include "CSR.h"
// TODO: move to a separate file
namespace tmp {
/**
** Count flop of SpGEMM between A and B in CSR format
**/
template<typename IT, typename NT>
long long int getFlop(const CSR<IT, NT> &A, const CSR<IT, NT> &B, IT *maxnnzc) {
long long int flop = 0; // total flop (multiplication) needed to generate C
#pragma omp parallel for reduction(+:flop)
for (IT i = 0; i < A.rows; ++i) {
long long int locmax = 0;
for (IT j = A.rowptr[i]; j < A.rowptr[i + 1]; ++j) {
IT inner = A.colids[j];
IT npins = B.rowptr[inner + 1] - B.rowptr[inner];
locmax += npins;
}
maxnnzc[i] = locmax;
flop += locmax;
}
return flop * 2;
}
}
namespace heap {
template<class RandomAccessIterator, class SizeT>
[[gnu::always_inline]]
inline void make(RandomAccessIterator heap, SizeT size) {
std::make_heap(heap, heap + size);
}
template<class RandomAccessIterator, class SizeT>
[[gnu::always_inline]]
inline void pop(RandomAccessIterator heap, SizeT &size) {
std::pop_heap(heap, heap + size);
size--;
}
template<class RandomAccessIterator, class SizeT>
[[gnu::always_inline]]
inline void sinkRoot(RandomAccessIterator heap, SizeT size) {
std::pop_heap(heap, heap + size);
std::push_heap(heap, heap + size);
}
}
namespace rowAlg {
struct HeapBase {
const static bool masked = false;
template<class IT, class NT>
static IT
estimateResultSize(IT rowBeginIdx, IT rowEndIdx, IT *maxnnzc,
const CSR<IT, NT> &A, const CSR<IT, NT> &B, const CSR<IT, NT> &M) {
return std::accumulate(maxnnzc + rowBeginIdx, maxnnzc + rowEndIdx, 0);
}
template<class IT, class NT>
static HeapEntry<IT, void> *allocateAuxiliaryMemory(IT rowBeginIdx, IT rowEndIdx, IT *maxnnzc,
const CSR<IT, NT> &A, const CSR<IT, NT> &B,
const CSR<IT, NT> &M) {
IT threadHeapSize = 0;
for (IT i = rowBeginIdx; i < rowEndIdx; ++i) {
IT rownnz = A.rowptr[i + 1] - A.rowptr[i];
if (rownnz > threadHeapSize) { threadHeapSize = rownnz; }
}
return my_malloc<HeapEntry<IT, void>>(threadHeapSize);
};
};
struct BasicHeap : HeapBase {
template<typename IT, typename NT, typename MultiplyOperation, typename AddOperation>
[[gnu::always_inline]]
static void row(const CSR<IT, NT> &A, const CSR<IT, NT> &B, const CSR<IT, NT> &M,
MultiplyOperation multop, AddOperation addop, IT i,
IT *rowNvals, IT *&prevColIdC, NT *&prevValueC, HeapEntry<IT, void> *mergeheap,
IT &threadNvals) {
// Make initial heap for the row
IT currRowNvals = 0;
IT hsize = 0;
for (IT j = A.rowptr[i]; j < A.rowptr[i + 1]; ++j) { // For all the nonzeros of the ith column
IT inner = A.colids[j]; // get the col id of A (or row id of B)
IT npins = B.rowptr[inner + 1] - B.rowptr[inner]; // get the number of nzs in B's row
if (npins == 0) { continue; }
mergeheap[hsize].loc = B.rowptr[inner];
mergeheap[hsize].runr = j; // the pointer to A.colid's is the run-rank
mergeheap[hsize++].key = B.colids[B.rowptr[inner]]; // B's first colid is the first key
}
heap::make(mergeheap, hsize);
// Traverse the heaps
while (hsize > 0) {
auto &hentry = mergeheap[0];
NT value = multop(A.values[hentry.runr], B.values[hentry.loc]);
// Use short circuiting
if ((currRowNvals > 0) && *prevColIdC == hentry.key) {
*prevValueC = addop(value, *prevValueC);
} else {
*(++prevValueC) = value;
*(++prevColIdC) = hentry.key;
currRowNvals++;
}
IT inner = A.colids[hentry.runr];
// If still unused nonzeros exists in A(:,colind), insert the next nonzero to the heap
if (++hentry.loc < B.rowptr[inner + 1]) {
hentry.key = B.colids[hentry.loc];
heap::sinkRoot(mergeheap, hsize);
} else {
heap::pop(mergeheap, hsize);
}
}
rowNvals[i] = currRowNvals;
threadNvals += currRowNvals;
}
};
template<size_t threshold>
struct HeapLinear : HeapBase {
template<typename IT, typename NT, typename MultiplyOperation, typename AddOperation>
[[gnu::always_inline]]
static void row(const CSR<IT, NT> &A, const CSR<IT, NT> &B, const CSR<IT, NT> &M,
MultiplyOperation multop, AddOperation addop, IT i,
IT *rowNvals, IT *&prevColIdC, NT *&prevValueC, HeapEntry<IT, void> *mergeheap,
IT &threadNvals) {
// Make initial heap for the row
IT currRowNvals = 0;
IT hsize = 0;
for (IT j = A.rowptr[i]; j < A.rowptr[i + 1]; ++j) { // For all the nonzeros of the ith column
IT inner = A.colids[j]; // get the col id of A (or row id of B)
IT npins = B.rowptr[inner + 1] - B.rowptr[inner]; // get the number of nzs in B's row
if (npins == 0) { continue; }
mergeheap[hsize].loc = B.rowptr[inner];
mergeheap[hsize].runr = j; // the pointer to A.colid's is the run-rank
mergeheap[hsize++].key = B.colids[B.rowptr[inner]]; // B's first colid is the first key
}
if (hsize > threshold) { heap::make(mergeheap, hsize); }
// Traverse the heaps
while (hsize > 0) {
IT idx = hsize > threshold ? 0 : std::max_element(mergeheap, mergeheap + hsize) - mergeheap;
auto &hentry = mergeheap[idx];
NT value = multop(A.values[hentry.runr], B.values[hentry.loc]);
// Use short circuiting
if ((currRowNvals > 0) && *prevColIdC == hentry.key) {
*prevValueC = addop(value, *prevValueC);
} else {
*(++prevValueC) = value;
*(++prevColIdC) = hentry.key;
currRowNvals++;
}
IT inner = A.colids[hentry.runr];
// If still unused nonzeros exists in A(:,colind), insert the next nonzero to the heap
if (++hentry.loc < B.rowptr[inner + 1]) {
hentry.key = B.colids[hentry.loc];
if (hsize > threshold) {
heap::sinkRoot(mergeheap, hsize);
}
} else {
if (hsize > threshold) {
heap::pop(mergeheap, hsize);
} else {
*(mergeheap + idx) = *(mergeheap + --hsize);
}
}
}
rowNvals[i] = currRowNvals;
threadNvals += currRowNvals;
}
};
struct MaskedHeapBase {
const static bool masked = true;
template<class IT, class NT>
static IT estimateResultSize(IT rowBeginIdx, IT rowEndIdx, IT *maxnnzc,
const CSR<IT, NT> &A, const CSR<IT, NT> &B, const CSR<IT, NT> &M) {
IT size = 0;
for (IT row = rowBeginIdx; row < rowEndIdx; row++) {
size += std::min(maxnnzc[row], M.rowptr[row + 1] - M.rowptr[row]);
}
return size;
}
template<class IT, class NT>
static HeapEntry<IT, void> *allocateAuxiliaryMemory(IT rowBeginIdx, IT rowEndIdx, IT *maxnnzc,
const CSR<IT, NT> &A, const CSR<IT, NT> &B,
const CSR<IT, NT> &M) {
IT threadHeapSize = 0;
for (IT i = rowBeginIdx; i < rowEndIdx; ++i) {
IT rownnz = A.rowptr[i + 1] - A.rowptr[i];
if (rownnz > threadHeapSize) { threadHeapSize = rownnz; }
}
return my_malloc<HeapEntry<IT, void>>(threadHeapSize);
};
};
struct MaskedHeap_v0 : MaskedHeapBase {
template<typename IT, typename NT, typename MultiplyOperation, typename AddOperation>
[[gnu::always_inline]]
static void row(const CSR<IT, NT> &A, const CSR<IT, NT> &B, const CSR<IT, NT> &M,
MultiplyOperation multop, AddOperation addop, IT i,
IT *rowNvals, IT *&prevColIdC, NT *&prevValueC, HeapEntry<IT, void> *mergeheap,
IT &threadNvals) {
IT maskIdx = M.rowptr[i];
IT maskEnd = M.rowptr[i + 1];
// Make initial heap for the row
IT currRowNvals = 0;
IT hsize = 0;
for (IT j = A.rowptr[i]; j < A.rowptr[i + 1]; ++j) { // For all the nonzeros of the ith column
IT inner = A.colids[j]; // get the col id of A (or row id of B)
IT npins = B.rowptr[inner + 1] - B.rowptr[inner]; // get the number of nzs in B's row
if (npins == 0) { continue; }
mergeheap[hsize].loc = B.rowptr[inner];
mergeheap[hsize].runr = j; // the pointer to A.colid's is the run-rank
mergeheap[hsize++].key = B.colids[B.rowptr[inner]]; // B's first colid is the first key
}
heap::make(mergeheap, hsize);
// Traverse the heaps
while (hsize > 0) {
auto &hentry = mergeheap[0];
while (maskIdx < maskEnd && hentry.key > M.colids[maskIdx]) { ++maskIdx; }
if (maskIdx >= maskEnd) { break; }
if (hentry.key == M.colids[maskIdx]) {
NT value = multop(A.values[hentry.runr], B.values[hentry.loc]);
// Use short circuiting
if ((currRowNvals > 0) && *prevColIdC == hentry.key) {
*prevValueC = addop(value, *prevValueC);
} else {
*(++prevValueC) = value;
*(++prevColIdC) = hentry.key;
currRowNvals++;
}
}
IT inner = A.colids[hentry.runr];
// If still unused nonzeros exists in A(:,colind), insert the next nonzero to the heap
if (++hentry.loc < B.rowptr[inner + 1]) {
hentry.key = B.colids[hentry.loc];
heap::sinkRoot(mergeheap, hsize);
} else {
heap::pop(mergeheap, hsize);
}
}
rowNvals[i] = currRowNvals;
threadNvals += currRowNvals;
}
};
struct MaskedHeap_v1 : MaskedHeapBase {
template<typename IT, typename NT, typename MultiplyOperation, typename AddOperation>
[[gnu::always_inline]]
static void row(const CSR<IT, NT> &A, const CSR<IT, NT> &B, const CSR<IT, NT> &M,
MultiplyOperation multop, AddOperation addop, IT i,
IT *rowNvals, IT *&prevColIdC, NT *&prevValueC, HeapEntry<IT, void> *mergeheap,
IT &threadNvals) {
IT maskIdx = M.rowptr[i];
IT maskEnd = M.rowptr[i + 1];
if (maskIdx == maskEnd) { return; }
// Make initial heap for the row
IT currRowNvals = 0;
IT hsize = 0;
for (IT j = A.rowptr[i]; j < A.rowptr[i + 1]; ++j) { // For all the nonzeros of the ith column
IT inner = A.colids[j]; // get the col id of A (or row id of B)
IT npins = B.rowptr[inner + 1] - B.rowptr[inner]; // get the number of nzs in B's row
if (npins == 0) { continue; }
mergeheap[hsize].loc = B.rowptr[inner];
mergeheap[hsize].runr = j; // the pointer to A.colid's is the run-rank
mergeheap[hsize].key = B.colids[B.rowptr[inner]]; // B's first colid is the first key
while (mergeheap[hsize].key < M.colids[maskIdx] && (mergeheap[hsize].loc + 1 < B.rowptr[inner + 1])) {
mergeheap[hsize].loc++;
mergeheap[hsize].key = B.colids[mergeheap[hsize].loc];
}
// If we did not reach the end of B's row, add it to the heap
if (mergeheap[hsize].loc < B.rowptr[inner + 1]) { hsize++; }
}
heap::make(mergeheap, hsize);
// Traverse the heaps
while (hsize > 0) {
auto &hentry = mergeheap[0];
while (maskIdx < maskEnd && hentry.key > M.colids[maskIdx]) { ++maskIdx; }
if (maskIdx >= maskEnd) { break; }
if (hentry.key == M.colids[maskIdx]) {
NT value = multop(A.values[hentry.runr], B.values[hentry.loc]);
// Use short circuiting
if ((currRowNvals > 0) && *prevColIdC == hentry.key) {
*prevValueC = addop(value, *prevValueC);
} else {
*(++prevValueC) = value;
*(++prevColIdC) = hentry.key;
currRowNvals++;
}
}
IT inner = A.colids[hentry.runr];
// Before pushing the entry back to the queue, remove elements that are < than current mask element
while (++hentry.loc < B.rowptr[inner + 1]) {
hentry.key = B.colids[hentry.loc];
if (hentry.key >= M.colids[maskIdx]) { break; }
}
if (hentry.loc < B.rowptr[inner + 1]) {
heap::sinkRoot(mergeheap, hsize);
} else {
heap::pop(mergeheap, hsize);
}
}
rowNvals[i] = currRowNvals;
threadNvals += currRowNvals;
}
};
struct MaskedHeap_v2 : MaskedHeapBase {
template<typename IT, typename NT, typename MultiplyOperation, typename AddOperation>
[[gnu::always_inline]]
static void row(const CSR<IT, NT> &A, const CSR<IT, NT> &B, const CSR<IT, NT> &M,
MultiplyOperation multop, AddOperation addop, IT i,
IT *rowNvals, IT *&prevColIdC, NT *&prevValueC, HeapEntry<IT, void> *mergeheap,
IT &threadNvals) {
IT maskIdx = M.rowptr[i];
IT maskEnd = M.rowptr[i + 1];
if (maskIdx == maskEnd) { return; }
// Make initial heap for the row
IT currRowNvals = 0;
IT hsize = 0;
for (IT j = A.rowptr[i]; j < A.rowptr[i + 1]; ++j) { // For all the nonzeros of the ith column
IT inner = A.colids[j]; // get the col id of A (or row id of B)
IT npins = B.rowptr[inner + 1] - B.rowptr[inner]; // get the number of nzs in B's row
if (npins == 0) { continue; }
mergeheap[hsize].loc = B.rowptr[inner];
mergeheap[hsize].runr = j; // the pointer to A.colid's is the run-rank
mergeheap[hsize].key = B.colids[B.rowptr[inner]]; // B's first colid is the first key
// Find the first match in the intersection of the mask column and the A column
IT maskIdxCopy = maskIdx;
while (true) {
if (mergeheap[hsize].key < M.colids[maskIdx]) {
if (++mergeheap[hsize].loc < B.rowptr[inner + 1]) {
mergeheap[hsize].key = B.colids[mergeheap[hsize].loc];
} else {
break;
}
} else if (mergeheap[hsize].key > M.colids[maskIdx]) {
if (++maskIdx == maskEnd) {
break;
}
} else {
hsize++;
break;
}
}
maskIdx = maskIdxCopy;
}
heap::make(mergeheap, hsize);
// Traverse the heaps
while (hsize > 0) {
auto &hentry = mergeheap[0];
while (maskIdx < maskEnd && hentry.key > M.colids[maskIdx]) { ++maskIdx; }
if (maskIdx >= maskEnd) { break; }
if (hentry.key == M.colids[maskIdx]) {
NT value = multop(A.values[hentry.runr], B.values[hentry.loc]);
// Use short circuiting
if ((currRowNvals > 0) && *prevColIdC == hentry.key) {
*prevValueC = addop(value, *prevValueC);
} else {
*(++prevValueC) = value;
*(++prevColIdC) = hentry.key;
currRowNvals++;
}
}
IT inner = A.colids[hentry.runr];
// Check if we are done with the current row from B, and if we are not move to the next element.
if (++hentry.loc >= B.rowptr[inner + 1]) {
heap::pop(mergeheap, hsize);
continue;
}
hentry.key = B.colids[hentry.loc];
// Find the first match in the intersection of
// the mask column (starting with maskIdx) and the A column (starting with hentry.loc)
IT maskIdxCopy = maskIdx;
while (true) {
if (hentry.key < M.colids[maskIdx]) {
if (++hentry.loc < B.rowptr[inner + 1]) {
hentry.key = B.colids[hentry.loc];
} else {
heap::pop(mergeheap, hsize);
break;
}
} else if (hentry.key > M.colids[maskIdx]) {
if (++maskIdx == maskEnd) {
heap::pop(mergeheap, hsize);
break;
}
} else {
// put the merge heap in the valid state again
heap::sinkRoot(mergeheap, hsize);
break;
}
}
maskIdx = maskIdxCopy;
}
rowNvals[i] = currRowNvals;
threadNvals += currRowNvals;
}
};
struct MCABase {
const static bool masked = true;
template<class IT, class NT>
static IT estimateResultSize(IT rowBeginIdx, IT rowEndIdx, IT *maxnnzc,
const CSR<IT, NT> &A, const CSR<IT, NT> &B, const CSR<IT, NT> &M) {
IT size = 0;
for (IT row = rowBeginIdx; row < rowEndIdx; row++) {
size += std::min(maxnnzc[row], M.rowptr[row + 1] - M.rowptr[row]);
}
return size;
}
template<class IT, class NT>
static bool *allocateAuxiliaryMemory(IT rowBeginIdx, IT rowEndIdx, IT *maxnnzc,
const CSR<IT, NT> &A, const CSR<IT, NT> &B, const CSR<IT, NT> &M) {
IT flagsSize = 0;
for (IT i = rowBeginIdx; i < rowEndIdx; ++i) {
IT maxMRow = M.rowptr[i + 1] - M.rowptr[i];
if (maxMRow > flagsSize) { flagsSize = maxMRow; }
}
return my_malloc<bool>(flagsSize);
};
};
struct MCA_v1 : MCABase {
const static bool masked = true;
template<typename IT, typename NT, typename MultiplyOperation, typename AddOperation>
[[gnu::always_inline]]
static void row(const CSR<IT, NT> &A, const CSR<IT, NT> &B, const CSR<IT, NT> &M,
MultiplyOperation multop, AddOperation addop, IT i,
IT *rowNvals, IT *&prevColIdC, NT *&prevValueC, bool *flags,
IT &threadNvals) {
IT maskBegin = M.rowptr[i];
const IT maskEnd = M.rowptr[i + 1];
const IT maskSize = maskEnd - maskBegin;
prevColIdC++;
prevValueC++;
std::fill(flags, flags + maskSize, false);
// Iterate though nonzeros in the A's current row
for (IT j = A.rowptr[i]; j < A.rowptr[i + 1]; j++) {
const IT inner = A.colids[j];
IT loc = B.rowptr[inner];
IT key = A.colids[loc];
if (loc == B.rowptr[inner + 1]) { continue; }
IT maskIdx = maskBegin;
// Find the intersection between the mask's row and the A's row
while (true) {
if (key < M.colids[maskIdx]) {
if (++loc < B.rowptr[inner + 1]) { key = B.colids[loc]; } else { break; }
} else if (key > M.colids[maskIdx]) {
if (++maskIdx == maskEnd) { break; }
} else {
// colid is found in both arrays
const auto idx = maskIdx - maskBegin;
const NT value = multop(A.values[j], B.values[loc]);
if (!flags[idx]) {
prevValueC[idx] = value;
flags[idx] = true;
} else {
prevValueC[idx] = addop(prevValueC[idx], value);
}
if (++loc < B.rowptr[inner + 1]) { key = B.colids[loc]; } else { break; }
if (++maskIdx == maskEnd) { break; }
}
}
}
/* Remove empty values the destination arrays and set row IDs */
size_t dst = 0;
for (size_t src = 0; src < maskSize; src++) {
if (flags[src]) {
prevColIdC[dst] = M.colids[maskBegin + src];
prevValueC[dst] = prevValueC[src];
dst++;
}
}
prevColIdC += dst - 1;
prevValueC += dst - 1;
rowNvals[i] = dst;
threadNvals += dst;
}
};
struct MCA_v2 : MCABase {
const static bool masked = true;
template<typename IT, typename NT, typename MultiplyOperation, typename AddOperation>
[[gnu::always_inline]]
static void row(const CSR<IT, NT> &A, const CSR<IT, NT> &B, const CSR<IT, NT> &M,
MultiplyOperation multop, AddOperation addop, IT i,
IT *rowNvals, IT *&prevColIdC, NT *&prevValueC, bool *flags,
IT &threadNvals) {
IT maskBegin = M.rowptr[i];
const IT maskEnd = M.rowptr[i + 1];
const IT maskSize = maskEnd - maskBegin;
// Since prev***C point to the previous element, increment them
prevColIdC++;
prevValueC++;
std::fill(flags, flags + maskSize, false);
// Iterate though nonzeros in the A's current row
for (IT j = A.rowptr[i]; j < A.rowptr[i + 1]; j++) {
const IT inner = A.colids[j];
IT loc = B.rowptr[inner];
IT key = A.colids[loc];
if (loc == B.rowptr[inner + 1]) { continue; }
// Find the intersection between the mask's row and the A's row
for (IT maskIdx = maskBegin; maskIdx < maskEnd; maskIdx++) {
while (key < M.colids[maskIdx]) {
if (++loc < B.rowptr[inner + 1]) { key = B.colids[loc]; } else { goto outerLoopBreak; }
}
if (key == M.colids[maskIdx]) {
// colid is found in both arrays
const auto idx = maskIdx - maskBegin;
const NT value = multop(A.values[j], B.values[loc]);
if (!flags[idx]) {
prevValueC[idx] = value;
flags[idx] = true;
} else {
prevValueC[idx] = addop(prevValueC[idx], value);
}
if (++loc < B.rowptr[inner + 1]) { key = B.colids[loc]; } else { break; }
}
}
outerLoopBreak:
continue;
}
/* Remove empty values the destination arrays and set row IDs */
size_t dst = 0;
for (size_t src = 0; src < maskSize; src++) {
if (flags[src]) {
prevColIdC[dst] = M.colids[maskBegin + src];
prevValueC[dst] = prevValueC[src];
dst++;
}
}
prevColIdC += dst - 1;
prevValueC += dst - 1;
rowNvals[i] = dst;
threadNvals += dst;
}
};
struct MCA_v3 : MCABase {
const static bool masked = true;
template<typename IT, typename NT, typename MultiplyOperation, typename AddOperation>
[[gnu::always_inline]]
static void row(const CSR<IT, NT> &A, const CSR<IT, NT> &B, const CSR<IT, NT> &M,
MultiplyOperation multop, AddOperation addop, IT i,
IT *rowNvals, IT *&prevColIdC, NT *&prevValueC, bool *flags,
IT &threadNvals) {
const auto maskBegin = &M.colids[M.rowptr[i]];
const auto maskEnd = &M.colids[M.rowptr[i + 1]];
const auto maskSize = maskEnd - maskBegin;
prevColIdC++;
prevValueC++;
std::fill(flags, flags + maskSize, false);
// Iterate though nonzeros in the A's current row
for (IT j = A.rowptr[i]; j < A.rowptr[i + 1]; j++) {
const IT inner = A.colids[j];
auto colIdsIt = &B.colids[B.rowptr[inner]];
const auto colIdsBegin = &B.colids[B.rowptr[inner]];
const auto colIdsEnd = &B.colids[B.rowptr[inner + 1]];
auto maskIt = maskBegin;
if (colIdsIt == colIdsEnd) { continue; }
// Find the intersection between the mask's row and the A's row
while (true) {
if (*colIdsIt < *maskIt) {
if (++colIdsIt == colIdsEnd) { break; }
} else if (*colIdsIt > *maskIt) {
if (++maskIt == maskEnd) { break; }
} else {
// colid is found in both arrays
const auto idx = maskIt - maskBegin;
const NT value = multop(A.values[j], B.values[B.rowptr[inner] + colIdsIt - colIdsBegin]);
if (!flags[idx]) {
prevValueC[idx] = value;
flags[idx] = true;
} else {
prevValueC[idx] = addop(prevValueC[idx], value);
}
if (++colIdsIt >= colIdsEnd) { break; }
if (++maskIt == maskEnd) { break; }
}
}
}
/* Remove empty values the destination arrays and set row IDs */
size_t dst = 0;
for (size_t src = 0; src < maskSize; src++) {
if (flags[src]) {
prevColIdC[dst] = maskBegin[src];
prevValueC[dst] = prevValueC[src];
dst++;
}
}
prevColIdC += dst - 1;
prevValueC += dst - 1;
rowNvals[i] = dst;
threadNvals += dst;
}
};
}
template<bool masked, class RowAlgorithm, typename IT, typename NT, typename MultiplyOperation, typename AddOperation>
void HeapSpGEMMImpl(const CSR<IT, NT> &A, const CSR<IT, NT> &B, CSR<IT, NT> &C, const CSR<IT, NT> &M,
MultiplyOperation multop, AddOperation addop, unsigned numThreads) {
static_assert(masked == RowAlgorithm::masked || masked, "Row algorithm does not support mask.");
static_assert(masked == RowAlgorithm::masked || !masked, "Row algorithm is used for masked computation.");
if (numThreads == 0) {
#pragma omp parallel
#pragma omp single
numThreads = omp_get_num_threads();
}
if (!C.isEmpty()) { C.make_empty(); }
C.rows = A.rows;
C.cols = B.cols;
// Load-balancing Thread Scheduling
IT *maxnnzc = my_malloc<IT>(A.rows);
long long int flops = tmp::getFlop(A, B, maxnnzc) / 2;
IT flopsPerThread = flops / numThreads; // amount of work that will be assigned to each thread
IT *rowStart = my_malloc<IT>(A.rows); //start index in the global array for storing ith column of C
IT *rowNvals = my_malloc<IT>(A.rows); // number of nonzeros in each each column in C
rowStart[0] = 0;
// Global space used to store result
IT *threadsNvals = my_malloc<IT>(numThreads);
// Parallelized version
scan(maxnnzc, rowStart, A.rows);
// ************************ Numeric Phase *************************************
#pragma omp parallel num_threads(numThreads)
{
int thisThread = omp_get_thread_num();
// @formatter:off
IT rowBegin = thisThread != 0 ? (lower_bound(rowStart, rowStart + A.rows, flopsPerThread * thisThread)) - rowStart : 0;
IT rowEnd = thisThread != numThreads - 1 ? (lower_bound(rowStart, rowStart + A.rows, flopsPerThread * (thisThread + 1))) - rowStart : A.rows;
// @formatter:on
IT localsum = RowAlgorithm::estimateResultSize(rowBegin, rowEnd, maxnnzc, A, B, M);
// We need +1 even though the first element of the array is never accessed.
// However, the first element may be prefetched so we have to allocate it together with the rest of the array.
IT *colIdsLocalMem = my_malloc<IT>(localsum + 1);
NT *valuesLocalMem = my_malloc<NT>(localsum + 1);
IT *prevColIdC = colIdsLocalMem;
NT *prevValueC = valuesLocalMem;
auto auxMemory = RowAlgorithm::allocateAuxiliaryMemory(rowBegin, rowEnd, maxnnzc, A, B, M);
IT threadNvals = 0;
// Iterate through all rows in A
for (IT i = rowBegin; i < rowEnd; ++i) {
RowAlgorithm::row(A, B, M, multop, addop, i, rowNvals, prevColIdC, prevValueC, auxMemory, threadNvals);
}
threadsNvals[thisThread] = threadNvals;
my_free(auxMemory);
#pragma omp barrier
#pragma omp master
{
C.rowptr = my_malloc<IT>(C.rows + 1);
C.rowptr[0] = 0;
C.nnz = std::accumulate(threadsNvals, threadsNvals + numThreads, IT(0));;
C.colids = my_malloc<IT>(C.nnz);
C.values = my_malloc<NT>(C.nnz);
}
IT rowPtrOffset = std::accumulate(threadsNvals, threadsNvals + thisThread, IT(0));
#pragma omp barrier
// set rowptr in C for local rows
for (IT i = rowBegin; i < rowEnd; ++i) {
C.rowptr[i] = rowPtrOffset;
rowPtrOffset += rowNvals[i];
}
if (thisThread == numThreads - 1) { C.rowptr[C.rows] = rowPtrOffset; }
// copy local values to C
copy(colIdsLocalMem + 1, colIdsLocalMem + threadNvals + 1, C.colids + C.rowptr[rowBegin]);
copy(valuesLocalMem + 1, valuesLocalMem + threadNvals + 1, C.values + C.rowptr[rowBegin]);
my_free<IT>(colIdsLocalMem);
my_free<NT>(valuesLocalMem);
}
my_free<IT>(maxnnzc);
my_free<IT>(rowStart);
my_free<IT>(rowNvals);
}
template<class RowAlgorithm, typename IT, typename NT, typename MultiplyOperation, typename AddOperation>
void HeapSpGEMM(const CSR<IT, NT> &A, const CSR<IT, NT> &B, CSR<IT, NT> &C,
MultiplyOperation multop, AddOperation addop, unsigned numThreads = 0) {
HeapSpGEMMImpl<false, RowAlgorithm>(A, B, C, CSR<IT, NT>{}, multop, addop, numThreads);
}
template<class RowAlgorithm, typename IT, typename NT, typename MultiplyOperation, typename AddOperation>
void HeapSpGEMM(const CSR<IT, NT> &A, const CSR<IT, NT> &B, CSR<IT, NT> &C, const CSR<IT, NT> &M,
MultiplyOperation multop, AddOperation addop, unsigned numThreads = 0) {
HeapSpGEMMImpl<true, RowAlgorithm>(A, B, C, M, multop, addop, numThreads);
}
#endif //MASKED_SPGEMM_HEAP_MULT_GENERIC_H
|
mcmc.h | #include <cmath>
#include <iostream>
#include <iomanip>
#include <vector>
#include <limits>
#include <string>
#include <stdexcept>
//#include <omp.h>
template <typename Observables, typename LikelihoodFunc, typename ReportFunc>
void DEMCMC_Priors(Randomizer& R, LikelihoodFunc likelihood, ReportFunc report,
int burn_in, int iterations, int n_chains, std::vector<Distribution>& priors,
bool verbose = true, std::vector<std::string> param_names = std::vector<std::string>(),
bool reeval_likelihood = false, bool in_parallel = false, int n_threads = -1,
bool classic_gamma = false, int R_skip = -1,
std::vector<std::vector<double>> init = std::vector<std::vector<double>>(), int init_iter = 1)
{
using namespace std;
// #ifdef _OPENMP
// if (in_parallel && n_threads > 0)
// omp_set_num_threads(n_threads);
// #endif
if (n_chains < 3)
throw runtime_error("Cannot use DE-MCMC with fewer than 3 chains.");
// Store acceptance rates
unsigned int ar_size = 1000;
unsigned int ar_i = 0;
bool show_ar = false;
vector<bool> acceptances(ar_size, false);
// Storage for chains and settings
int n_theta = priors.size();
vector<vector<double>> chains(n_chains, vector<double>(n_theta, 0.0));
vector<Observables> obs(n_chains, Observables{});
vector<double> p(n_chains, 0.0); // log probability for each chain
vector<double> l(n_chains, 0.0); // log likelihood for each chain
double b = 0.001;
// Storage for calls to Randomizer - to make thread-safe
vector<vector<double>> random_perturbations(n_chains, vector<double>(n_theta, 0.0));
vector<double> random_tests(n_chains, 0.0);
vector<double> random_gammas(n_chains, 0.0);
vector<vector<int>> random_chains(n_chains, vector<int>(2, 0));
// Storage for calculation of Gelman-Rubin-Brooks R diagnostic
int running_count = 0;
vector<vector<double>> running_mean(n_chains, vector<double>(n_theta, 0.0));
vector<vector<double>> running_M2(n_chains, vector<double>(n_theta, 0.0));
vector<vector<double>> running_s2(n_chains, vector<double>(n_theta, 0.0));
vector<double> R_hat(n_theta, 0.0);
const int n_between_R = 100;
// Assemble target func
auto target = [&](vector<double>& theta, Observables& obs, double& l)
{
double p = 0;
for (int d = 0; d < n_theta; ++d)
{
double pd = priors[d].LogProbability(theta[d]);
if (pd == -std::numeric_limits<double>::infinity())
{
l = -std::numeric_limits<double>::infinity();
return -std::numeric_limits<double>::infinity();
}
p += pd;
}
l = likelihood(theta, obs);
return l + p;
};
// Initialize chains . . .
if (init.empty())
{
// . . . from prior
for (int c = 0; c < n_chains; ++c)
for (int d = 0; d < n_theta; ++d)
chains[c][d] = priors[d].RandomInit(R);
}
else
{
// . . . from initial values supplied
if ((int)init.size() != n_chains || (int)init[0].size() != n_theta)
throw runtime_error("init vector supplied is not the right size.");
chains = init;
}
// Set initial probabilities and observables
if (verbose)
cout << "Initializing chains...\n";
for (int c = 0; c < n_chains; ++c)
{
p[c] = target(chains[c], obs[c], l[c]);
if (verbose)
cout << "." << flush;
}
// Do iterations
if (verbose)
cout << "\nIterating...\n" << flush;
for (int i = init_iter; i < burn_in + iterations; ++i)
{
// If requested, re-evaluate likelihood for next iteration
if (reeval_likelihood)
{
// #pragma omp parallel for if(in_parallel) schedule(dynamic)
for (int c = 0; c < n_chains; ++c)
p[c] = target(chains[c], obs[c], l[c]);
}
// Prepare storage and random variates
bool migration = i < burn_in * 0.75 ? R.Bernoulli(0.1) : false;
vector<int> migration_indices(n_chains, 0);
if (migration)
{
for (int c = 0; c < n_chains; ++c)
migration_indices[c] = c;
R.Shuffle(migration_indices);
}
for (int c = 0; c < n_chains; ++c)
{
for (int d = 0; d < n_theta; ++d)
random_perturbations[c][d] = R.Uniform(-b, b);
random_tests[c] = R.Uniform();
if (!migration)
{
if (classic_gamma)
random_gammas[c] = (i % 10 == 0 ? 1.0 : 2.38 / sqrt(2 * n_theta));
else
random_gammas[c] = R.Uniform(0.5, 1.0);
do random_chains[c][0] = R.Discrete(n_chains); while (random_chains[c][0] == c);
do random_chains[c][1] = R.Discrete(n_chains); while (random_chains[c][1] == c || random_chains[c][1] == random_chains[c][0]);
}
}
auto saved_chains = chains;
vector<int> accept(n_chains, 0);
// #pragma omp parallel for if(in_parallel) schedule(dynamic)
for (int c = 0; c < n_chains; ++c)
{
vector<double> theta_p = chains[c];
int c_from = c;
// Generate proposal, either by migration...
if (migration)
{
c_from = migration_indices[c];
theta_p = saved_chains[migration_indices[(c + 1) % n_chains]];
for (int d = 0; d < n_theta; ++d)
theta_p[d] += random_perturbations[c][d];
}
else // ... or by directed mutation
{
for (int d = 0; d < n_theta; ++d)
theta_p[d] += random_gammas[c] * (saved_chains[random_chains[c][1]][d] - saved_chains[random_chains[c][0]][d]) + random_perturbations[c][d];
}
// Calculate log-probability and accept or reject
Observables obs_p{};
double l_p = 0;
double p_p = target(theta_p, obs_p, l_p);
if ( (p_p == -std::numeric_limits<double>::infinity() && p[c_from] == -std::numeric_limits<double>::infinity() && random_tests[c] < 0.5)
|| (p_p > -std::numeric_limits<double>::infinity() && random_tests[c] < exp(p_p - p[c_from])) )
{
accept[c_from] = 1;
chains[c_from] = theta_p;
obs[c_from] = obs_p;
p[c_from] = p_p;
l[c_from] = l_p;
}
}
// Update acceptances
for (int c = 0; c < n_chains; ++c)
{
if (ar_i == ar_size - 1) show_ar = true;
acceptances[ar_i] = accept[c];
ar_i = (ar_i + 1) % ar_size;
}
// Update Gelman-Rubin-Brooks R
bool R_all_ok = true;
if (R_skip > 0)
{
++running_count;
for (int c = 0; c < n_chains; ++c)
{
for (int d = 0; d < n_theta; ++d)
{
double delta = chains[c][d] - running_mean[c][d];
running_mean[c][d] += delta / running_count;
double delta2 = chains[c][d] - running_mean[c][d];
running_M2[c][d] += delta * delta2;
}
}
// Calculate R every n_between_R generations
if (i % n_between_R == 0)
{
// Finalise running mean and variance
for (int c = 0; c < n_chains; ++c)
for (int d = 0; d < n_theta; ++d)
running_s2[c][d] = running_M2[c][d] / (running_count - 1);
// Calculate statistic for each parameter
for (int d = 0; d < n_theta; ++d)
{
double M = n_chains;
double N = running_count;
double W = 0, X = 0, B = 0;
for (int c = 0; c < n_chains; ++c)
{
W += running_s2[c][d];
X += running_mean[c][d];
}
W /= M;
X /= M;
for (int c = 0; c < n_chains; ++c)
B += (running_mean[c][d] - X) * (running_mean[c][d] - X);
B *= N / (M - 1);
double var = ((N - 1) / N) * W + B / N;
R_hat[d] = std::sqrt(var / W);
if (R_hat[d] > 1.05)
R_all_ok = false;
}
}
}
// Report results of this iteration
for (int c = 0; c < n_chains; ++c)
report(i - burn_in, p[c], c, l[c], chains[c], obs[c]);
// Print progress
if (verbose)
{
cout << "." << flush;
if (i % 100 == 0)
{
cout << "\n" << (i < burn_in ? "burn-in" : "main") << " iteration " << i - burn_in << ":";
if (!param_names.empty())
{
cout << "\n " << setw(12) << right << "log (P)" << setw(12) << right << "log (L)";
for (auto n : param_names)
cout << setw(12) << right << n;
}
for (int c = 0; c < n_chains; ++c)
{
cout << "\nchain" << setw(4) << right << c << setw(12) << right << p[c] << setw(12) << right << l[c];
for (int d = 0; d < n_theta; ++d)
cout << setw(12) << right << chains[c][d];
}
if (R_skip > 0)
{
cout << "\nGelman-Rubin-Brooks diagnostic R ";
for (int d = 0; d < n_theta; ++d)
cout << setw(12) << right << R_hat[d];
}
double acceptance_rate = show_ar ? (double)count(acceptances.begin(), acceptances.end(), true) / ar_size : -1;
cout << "\nacceptance rate: " << acceptance_rate << "\n\n" << flush;
}
}
// Skip past burn-in if R OK
if (R_skip > 0 && i > R_skip && i < burn_in && R_all_ok)
{
if (verbose)
cout << "\n\nSkipping to iterations (R < 1.05 reached).\n\n";
i = burn_in - 1;
}
}
if (verbose)
cout << "\n";
}
// based on Runarsson & Yao 2004; https://notendur.hi.is/~tpr/papers/RuYa05.pdf
struct Particle
{
Particle(Randomizer& rand, const std::vector<double>& lb, const std::vector<double>& ub, std::vector<Distribution>& priors)
{
for (unsigned int i = 0; i < lb.size(); ++i)
{
x.push_back(priors[i].RandomInit(rand));
s.push_back((ub[i] - lb[i]) / sqrt(lb.size()));
}
}
std::vector<double> x;
std::vector<double> s;
double f;
};
template <typename Observables, typename LikelihoodFunc, typename ReportFunc>
void Optimize_Priors(Randomizer& R, LikelihoodFunc likelihood, ReportFunc report, std::vector<Distribution>& priors,
unsigned int maxeval, double ftol_abs, bool verbose)
{
using namespace std;
unsigned int d = priors.size();
unsigned int np = 10 * (d + 1); // number of particles
unsigned int mu = np / 4; // number of fittest particles to reproduce
double alpha = 0.2; // exponential smoothing
double gamma = 0.85; // differential variation step size
double taup = 1.0 / sqrt(2. * d); // step size evolution parameter
double tau = 1.0 / sqrt(2. * sqrt(d)); // step size evolution parameter
// Determine bounds
vector<double> lb, ub;
for (unsigned int i = 0; i < priors.size(); ++i)
{
lb.push_back(priors[i].LowerBound());
ub.push_back(priors[i].UpperBound());
}
// Initialize particles
vector<Particle> y;
for (unsigned int i = 0; i < np; ++i)
y.push_back(Particle(R, lb, ub, priors));
vector<Particle> yn = y;
// Assemble target func
auto target = [&](vector<double>& theta, Observables& obs, double& l)
{
double p = 0;
for (unsigned int i = 0; i < d; ++i)
{
double pd = priors[i].LogProbability(theta[i]);
if (pd == -std::numeric_limits<double>::infinity())
{
l = -std::numeric_limits<double>::infinity();
return -std::numeric_limits<double>::infinity();
}
p += pd;
}
l = likelihood(theta, obs);
return l + p;
};
Observables obs{};
double l = 0;
double last_f = -std::numeric_limits<double>::infinity();
// Iterate
unsigned int t = 0;
for (;; ++t)
{
// Evaluate objective function
for (auto& i : y)
i.f = target(i.x, obs, l);
// Rank particles by fitness
sort(y.begin(), y.end(), [](const Particle& a, const Particle& b) { return a.f > b.f; });
// Report on progress
if (verbose && t % 10 == 0)
{
cout << "Iteration " << t << ": ";
for (auto& b : y[0].x)
cout << b << " ";
cout << "with lp = " << y[0].f << "\n";
}
// Quit if end condition reached
double this_f = y[0].f;
if (abs(this_f - last_f) < ftol_abs || t == maxeval - 1)
break;
last_f = this_f;
// Update particles
for (unsigned int k = 0; k < np; ++k)
{
unsigned int i = k % mu;
double norm = R.Normal();
if (k < mu) // differential variation
{
yn[k] = y[i];
for (unsigned int j = 0; j < d; ++j)
yn[k].x[j] += gamma * (y[0].x[j] - y[i + 1].x[j]);
}
else // standard mutation
{
yn[k] = y[i];
for (unsigned int j = 0; j < d; ++j)
{
yn[k].s[j] *= exp(taup * norm + tau * R.Normal());
do {
yn[k].x[j] = yn[i].x[j] + yn[k].s[j] * R.Normal();
} while (yn[k].x[j] < lb[j] || yn[k].x[j] > ub[j]);
yn[k].s[j] = y[i].s[j] + alpha * (yn[k].s[j] - y[i].s[j]);
yn[k].s[j] = min(yn[k].s[j], (ub[j] - lb[j]) / sqrt(d));
}
}
// clamp to range
for (unsigned int j = 0; j < d; ++j)
yn[k].x[j] = max(lb[j], min(ub[j], yn[k].x[j]));
}
swap(y, yn);
}
// Report back
target(y[0].x, obs, l);
report(t, y[0].f, 0, l, y[0].x, obs);
}
|
libsais.c | /*--
This file is a part of libsais, a library for linear time
suffix array and burrows wheeler transform construction.
Copyright (c) 2021 Ilya Grebnov <ilya.grebnov@gmail.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Please see the file LICENSE for full copyright information.
--*/
/*--
Changes made to the original file:
- July 14, 2021 Switched to internal bsc malloc / free functions.
--*/
#include <stddef.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include "libsais_internal.h"
#include "libsais.h"
#include "../../platform/platform.h"
#undef INLINE
#undef RESTRICT
#undef ALPHABET_SIZE
#if defined(_OPENMP)
#include <omp.h>
#else
#define UNUSED(_x) (void)(_x)
#endif
typedef int32_t sa_sint_t;
typedef uint32_t sa_uint_t;
typedef ptrdiff_t fast_sint_t;
typedef size_t fast_uint_t;
#define SAINT_BIT (32)
#define SAINT_MAX INT32_MAX
#define SAINT_MIN INT32_MIN
#define ALPHABET_SIZE (1 << CHAR_BIT)
#define UNBWT_FASTBITS (17)
#define SUFFIX_GROUP_BIT (SAINT_BIT - 1)
#define SUFFIX_GROUP_MARKER (((sa_sint_t)1) << (SUFFIX_GROUP_BIT - 1))
#define BUCKETS_INDEX2(_c, _s) (((_c) << 1) + (_s))
#define BUCKETS_INDEX4(_c, _s) (((_c) << 2) + (_s))
#define LIBSAIS_PER_THREAD_CACHE_SIZE (24576)
typedef struct LIBSAIS_THREAD_CACHE
{
sa_sint_t symbol;
sa_sint_t index;
} LIBSAIS_THREAD_CACHE;
typedef union LIBSAIS_THREAD_STATE
{
struct
{
fast_sint_t position;
fast_sint_t count;
fast_sint_t m;
fast_sint_t last_lms_suffix;
sa_sint_t * buckets;
LIBSAIS_THREAD_CACHE * cache;
} state;
uint8_t padding[64];
} LIBSAIS_THREAD_STATE;
typedef struct LIBSAIS_CONTEXT
{
sa_sint_t * buckets;
LIBSAIS_THREAD_STATE * thread_state;
fast_sint_t threads;
} LIBSAIS_CONTEXT;
typedef struct LIBSAIS_UNBWT_CONTEXT
{
sa_uint_t * bucket2;
uint16_t * fastbits;
sa_uint_t * buckets;
fast_sint_t threads;
} LIBSAIS_UNBWT_CONTEXT;
#if defined(__GNUC__) || defined(__clang__)
#define RESTRICT __restrict__
#elif defined(_MSC_VER) || defined(__INTEL_COMPILER)
#define RESTRICT __restrict
#else
#error Your compiler, configuration or platform is not supported.
#endif
#if defined(__has_builtin)
#if __has_builtin(__builtin_prefetch)
#define HAS_BUILTIN_PREFECTCH
#endif
#elif defined(__GNUC__) && __GNUC__ > 3
#define HAS_BUILTIN_PREFECTCH
#endif
#if defined(HAS_BUILTIN_PREFECTCH)
#define libsais_prefetch(address) __builtin_prefetch((const void *)(address), 0, 0)
#define libsais_prefetchw(address) __builtin_prefetch((const void *)(address), 1, 0)
#elif defined (_M_IX86) || defined (_M_AMD64)
#include <intrin.h>
#define libsais_prefetch(address) _mm_prefetch((const void *)(address), _MM_HINT_NTA)
#define libsais_prefetchw(address) _m_prefetchw((const void *)(address))
#elif defined (_M_ARM)
#include <intrin.h>
#define libsais_prefetch(address) __prefetch((const void *)(address))
#define libsais_prefetchw(address) __prefetchw((const void *)(address))
#elif defined (_M_ARM64)
#include <intrin.h>
#define libsais_prefetch(address) __prefetch2((const void *)(address), 1)
#define libsais_prefetchw(address) __prefetch2((const void *)(address), 17)
#else
#error Your compiler, configuration or platform is not supported.
#endif
#if !defined(__LITTLE_ENDIAN__) && !defined(__BIG_ENDIAN__)
#if defined(_LITTLE_ENDIAN) \
|| (defined(BYTE_ORDER) && defined(LITTLE_ENDIAN) && BYTE_ORDER == LITTLE_ENDIAN) \
|| (defined(_BYTE_ORDER) && defined(_LITTLE_ENDIAN) && _BYTE_ORDER == _LITTLE_ENDIAN) \
|| (defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && __BYTE_ORDER == __LITTLE_ENDIAN) \
|| (defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
#define __LITTLE_ENDIAN__
#elif defined(_BIG_ENDIAN) \
|| (defined(BYTE_ORDER) && defined(BIG_ENDIAN) && BYTE_ORDER == BIG_ENDIAN) \
|| (defined(_BYTE_ORDER) && defined(_BIG_ENDIAN) && _BYTE_ORDER == _BIG_ENDIAN) \
|| (defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && __BYTE_ORDER == __BIG_ENDIAN) \
|| (defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
#define __BIG_ENDIAN__
#elif defined(_WIN32)
#define __LITTLE_ENDIAN__
#endif
#endif
#if defined(__LITTLE_ENDIAN__) && !defined(__BIG_ENDIAN__)
#if defined(__GNUC__) || defined(__clang__)
#define libsais_bswap16(x) (__builtin_bswap16(x))
#elif defined(_MSC_VER) && !defined(__INTEL_COMPILER)
#define libsais_bswap16(x) (_byteswap_ushort(x))
#else
#define libsais_bswap16(x) ((uint16_t)(x >> 8) | (uint16_t)(x << 8))
#endif
#elif !defined(__LITTLE_ENDIAN__) && defined(__BIG_ENDIAN__)
#define libsais_bswap16(x) (x)
#else
#error Your compiler, configuration or platform is not supported.
#endif
static void * libsais_align_up(const void * address, size_t alignment)
{
return (void *)((((ptrdiff_t)address) + ((ptrdiff_t)alignment) - 1) & (-((ptrdiff_t)alignment)));
}
static void * libsais_alloc_aligned(size_t size, size_t alignment)
{
void * address = bsc_malloc(size + sizeof(short) + alignment - 1);
if (address != NULL)
{
void * aligned_address = libsais_align_up((void *)((ptrdiff_t)address + (ptrdiff_t)(sizeof(short))), alignment);
((short *)aligned_address)[-1] = (short)((ptrdiff_t)aligned_address - (ptrdiff_t)address);
return aligned_address;
}
return NULL;
}
static void libsais_free_aligned(void * aligned_address)
{
if (aligned_address != NULL)
{
bsc_free((void *)((ptrdiff_t)aligned_address - ((short *)aligned_address)[-1]));
}
}
static LIBSAIS_THREAD_STATE * libsais_alloc_thread_state(sa_sint_t threads)
{
LIBSAIS_THREAD_STATE * RESTRICT thread_state = (LIBSAIS_THREAD_STATE *)libsais_alloc_aligned((size_t)threads * sizeof(LIBSAIS_THREAD_STATE), 4096);
sa_sint_t * RESTRICT thread_buckets = (sa_sint_t *)libsais_alloc_aligned((size_t)threads * 4 * ALPHABET_SIZE * sizeof(sa_sint_t), 4096);
LIBSAIS_THREAD_CACHE * RESTRICT thread_cache = (LIBSAIS_THREAD_CACHE *)libsais_alloc_aligned((size_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE * sizeof(LIBSAIS_THREAD_CACHE), 4096);
if (thread_state != NULL && thread_buckets != NULL && thread_cache != NULL)
{
fast_sint_t t;
for (t = 0; t < threads; ++t)
{
thread_state[t].state.buckets = thread_buckets; thread_buckets += 4 * ALPHABET_SIZE;
thread_state[t].state.cache = thread_cache; thread_cache += LIBSAIS_PER_THREAD_CACHE_SIZE;
}
return thread_state;
}
libsais_free_aligned(thread_cache);
libsais_free_aligned(thread_buckets);
libsais_free_aligned(thread_state);
return NULL;
}
static void libsais_free_thread_state(LIBSAIS_THREAD_STATE * thread_state)
{
if (thread_state != NULL)
{
libsais_free_aligned(thread_state[0].state.cache);
libsais_free_aligned(thread_state[0].state.buckets);
libsais_free_aligned(thread_state);
}
}
static LIBSAIS_CONTEXT * libsais_create_ctx_main(sa_sint_t threads)
{
LIBSAIS_CONTEXT * RESTRICT ctx = (LIBSAIS_CONTEXT *)libsais_alloc_aligned(sizeof(LIBSAIS_CONTEXT), 64);
sa_sint_t * RESTRICT buckets = (sa_sint_t *)libsais_alloc_aligned(8 * ALPHABET_SIZE * sizeof(sa_sint_t), 4096);
LIBSAIS_THREAD_STATE * RESTRICT thread_state = threads > 1 ? libsais_alloc_thread_state(threads) : NULL;
if (ctx != NULL && buckets != NULL && (thread_state != NULL || threads == 1))
{
ctx->buckets = buckets;
ctx->threads = threads;
ctx->thread_state = thread_state;
return ctx;
}
libsais_free_thread_state(thread_state);
libsais_free_aligned(buckets);
libsais_free_aligned(ctx);
return NULL;
}
static void libsais_free_ctx_main(LIBSAIS_CONTEXT * ctx)
{
if (ctx != NULL)
{
libsais_free_thread_state(ctx->thread_state);
libsais_free_aligned(ctx->buckets);
libsais_free_aligned(ctx);
}
}
#if defined(_OPENMP)
static sa_sint_t libsais_count_negative_marked_suffixes(sa_sint_t * RESTRICT SA, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
sa_sint_t count = 0;
fast_sint_t i; for (i = omp_block_start; i < omp_block_start + omp_block_size; ++i) { count += (SA[i] < 0); }
return count;
}
static sa_sint_t libsais_count_zero_marked_suffixes(sa_sint_t * RESTRICT SA, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
sa_sint_t count = 0;
fast_sint_t i; for (i = omp_block_start; i < omp_block_start + omp_block_size; ++i) { count += (SA[i] == 0); }
return count;
}
static void libsais_place_cached_suffixes(sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 3; i < j; i += 4)
{
libsais_prefetch(&cache[i + 2 * prefetch_distance]);
libsais_prefetchw(&SA[cache[i + prefetch_distance + 0].symbol]);
libsais_prefetchw(&SA[cache[i + prefetch_distance + 1].symbol]);
libsais_prefetchw(&SA[cache[i + prefetch_distance + 2].symbol]);
libsais_prefetchw(&SA[cache[i + prefetch_distance + 3].symbol]);
SA[cache[i + 0].symbol] = cache[i + 0].index;
SA[cache[i + 1].symbol] = cache[i + 1].index;
SA[cache[i + 2].symbol] = cache[i + 2].index;
SA[cache[i + 3].symbol] = cache[i + 3].index;
}
for (j += prefetch_distance + 3; i < j; i += 1)
{
SA[cache[i].symbol] = cache[i].index;
}
}
static void libsais_compact_and_place_cached_suffixes(sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j, l;
for (i = omp_block_start, j = omp_block_start + omp_block_size - 3, l = omp_block_start; i < j; i += 4)
{
libsais_prefetchw(&cache[i + prefetch_distance]);
cache[l] = cache[i + 0]; l += cache[l].symbol >= 0;
cache[l] = cache[i + 1]; l += cache[l].symbol >= 0;
cache[l] = cache[i + 2]; l += cache[l].symbol >= 0;
cache[l] = cache[i + 3]; l += cache[l].symbol >= 0;
}
for (j += 3; i < j; i += 1)
{
cache[l] = cache[i]; l += cache[l].symbol >= 0;
}
libsais_place_cached_suffixes(SA, cache, omp_block_start, l - omp_block_start);
}
static void libsais_accumulate_counts_s32_2(sa_sint_t * RESTRICT bucket00, fast_sint_t bucket_size, fast_sint_t bucket_stride)
{
sa_sint_t * RESTRICT bucket01 = bucket00 - bucket_stride;
fast_sint_t s; for (s = 0; s < bucket_size; s += 1) { bucket00[s] = bucket00[s] + bucket01[s]; }
}
static void libsais_accumulate_counts_s32_3(sa_sint_t * RESTRICT bucket00, fast_sint_t bucket_size, fast_sint_t bucket_stride)
{
sa_sint_t * RESTRICT bucket01 = bucket00 - bucket_stride;
sa_sint_t * RESTRICT bucket02 = bucket01 - bucket_stride;
fast_sint_t s; for (s = 0; s < bucket_size; s += 1) { bucket00[s] = bucket00[s] + bucket01[s] + bucket02[s]; }
}
static void libsais_accumulate_counts_s32_4(sa_sint_t * RESTRICT bucket00, fast_sint_t bucket_size, fast_sint_t bucket_stride)
{
sa_sint_t * RESTRICT bucket01 = bucket00 - bucket_stride;
sa_sint_t * RESTRICT bucket02 = bucket01 - bucket_stride;
sa_sint_t * RESTRICT bucket03 = bucket02 - bucket_stride;
fast_sint_t s; for (s = 0; s < bucket_size; s += 1) { bucket00[s] = bucket00[s] + bucket01[s] + bucket02[s] + bucket03[s]; }
}
static void libsais_accumulate_counts_s32_5(sa_sint_t * RESTRICT bucket00, fast_sint_t bucket_size, fast_sint_t bucket_stride)
{
sa_sint_t * RESTRICT bucket01 = bucket00 - bucket_stride;
sa_sint_t * RESTRICT bucket02 = bucket01 - bucket_stride;
sa_sint_t * RESTRICT bucket03 = bucket02 - bucket_stride;
sa_sint_t * RESTRICT bucket04 = bucket03 - bucket_stride;
fast_sint_t s; for (s = 0; s < bucket_size; s += 1) { bucket00[s] = bucket00[s] + bucket01[s] + bucket02[s] + bucket03[s] + bucket04[s]; }
}
static void libsais_accumulate_counts_s32_6(sa_sint_t * RESTRICT bucket00, fast_sint_t bucket_size, fast_sint_t bucket_stride)
{
sa_sint_t * RESTRICT bucket01 = bucket00 - bucket_stride;
sa_sint_t * RESTRICT bucket02 = bucket01 - bucket_stride;
sa_sint_t * RESTRICT bucket03 = bucket02 - bucket_stride;
sa_sint_t * RESTRICT bucket04 = bucket03 - bucket_stride;
sa_sint_t * RESTRICT bucket05 = bucket04 - bucket_stride;
fast_sint_t s; for (s = 0; s < bucket_size; s += 1) { bucket00[s] = bucket00[s] + bucket01[s] + bucket02[s] + bucket03[s] + bucket04[s] + bucket05[s]; }
}
static void libsais_accumulate_counts_s32_7(sa_sint_t * RESTRICT bucket00, fast_sint_t bucket_size, fast_sint_t bucket_stride)
{
sa_sint_t * RESTRICT bucket01 = bucket00 - bucket_stride;
sa_sint_t * RESTRICT bucket02 = bucket01 - bucket_stride;
sa_sint_t * RESTRICT bucket03 = bucket02 - bucket_stride;
sa_sint_t * RESTRICT bucket04 = bucket03 - bucket_stride;
sa_sint_t * RESTRICT bucket05 = bucket04 - bucket_stride;
sa_sint_t * RESTRICT bucket06 = bucket05 - bucket_stride;
fast_sint_t s; for (s = 0; s < bucket_size; s += 1) { bucket00[s] = bucket00[s] + bucket01[s] + bucket02[s] + bucket03[s] + bucket04[s] + bucket05[s] + bucket06[s]; }
}
static void libsais_accumulate_counts_s32_8(sa_sint_t * RESTRICT bucket00, fast_sint_t bucket_size, fast_sint_t bucket_stride)
{
sa_sint_t * RESTRICT bucket01 = bucket00 - bucket_stride;
sa_sint_t * RESTRICT bucket02 = bucket01 - bucket_stride;
sa_sint_t * RESTRICT bucket03 = bucket02 - bucket_stride;
sa_sint_t * RESTRICT bucket04 = bucket03 - bucket_stride;
sa_sint_t * RESTRICT bucket05 = bucket04 - bucket_stride;
sa_sint_t * RESTRICT bucket06 = bucket05 - bucket_stride;
sa_sint_t * RESTRICT bucket07 = bucket06 - bucket_stride;
fast_sint_t s; for (s = 0; s < bucket_size; s += 1) { bucket00[s] = bucket00[s] + bucket01[s] + bucket02[s] + bucket03[s] + bucket04[s] + bucket05[s] + bucket06[s] + bucket07[s]; }
}
static void libsais_accumulate_counts_s32_9(sa_sint_t * RESTRICT bucket00, fast_sint_t bucket_size, fast_sint_t bucket_stride)
{
sa_sint_t * RESTRICT bucket01 = bucket00 - bucket_stride;
sa_sint_t * RESTRICT bucket02 = bucket01 - bucket_stride;
sa_sint_t * RESTRICT bucket03 = bucket02 - bucket_stride;
sa_sint_t * RESTRICT bucket04 = bucket03 - bucket_stride;
sa_sint_t * RESTRICT bucket05 = bucket04 - bucket_stride;
sa_sint_t * RESTRICT bucket06 = bucket05 - bucket_stride;
sa_sint_t * RESTRICT bucket07 = bucket06 - bucket_stride;
sa_sint_t * RESTRICT bucket08 = bucket07 - bucket_stride;
fast_sint_t s; for (s = 0; s < bucket_size; s += 1) { bucket00[s] = bucket00[s] + bucket01[s] + bucket02[s] + bucket03[s] + bucket04[s] + bucket05[s] + bucket06[s] + bucket07[s] + bucket08[s]; }
}
static void libsais_accumulate_counts_s32(sa_sint_t * RESTRICT buckets, fast_sint_t bucket_size, fast_sint_t bucket_stride, fast_sint_t num_buckets)
{
while (num_buckets >= 9)
{
libsais_accumulate_counts_s32_9(buckets - (num_buckets - 9) * bucket_stride, bucket_size, bucket_stride); num_buckets -= 8;
}
switch (num_buckets)
{
case 1: break;
case 2: libsais_accumulate_counts_s32_2(buckets, bucket_size, bucket_stride); break;
case 3: libsais_accumulate_counts_s32_3(buckets, bucket_size, bucket_stride); break;
case 4: libsais_accumulate_counts_s32_4(buckets, bucket_size, bucket_stride); break;
case 5: libsais_accumulate_counts_s32_5(buckets, bucket_size, bucket_stride); break;
case 6: libsais_accumulate_counts_s32_6(buckets, bucket_size, bucket_stride); break;
case 7: libsais_accumulate_counts_s32_7(buckets, bucket_size, bucket_stride); break;
case 8: libsais_accumulate_counts_s32_8(buckets, bucket_size, bucket_stride); break;
}
}
#endif
static void libsais_gather_lms_suffixes_8u(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, fast_sint_t m, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
if (omp_block_size > 0)
{
const fast_sint_t prefetch_distance = 128;
fast_sint_t i, j = omp_block_start + omp_block_size, c0 = T[omp_block_start + omp_block_size - 1], c1 = -1;
while (j < n && (c1 = T[j]) == c0) { ++j; }
fast_uint_t s = c0 >= c1;
for (i = omp_block_start + omp_block_size - 2, j = omp_block_start + 3; i >= j; i -= 4)
{
libsais_prefetch(&T[i - prefetch_distance]);
c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1);
c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 0); m -= ((s & 3) == 1);
c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 1); m -= ((s & 3) == 1);
c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 2); m -= ((s & 3) == 1);
}
for (j -= 3; i >= j; i -= 1)
{
c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1);
}
SA[m] = (sa_sint_t)(i + 1);
}
}
static void libsais_gather_lms_suffixes_8u_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536 && omp_get_dynamic() == 0)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(thread_state);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (n / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : n - omp_block_start;
if (omp_num_threads == 1)
{
libsais_gather_lms_suffixes_8u(T, SA, n, (fast_sint_t)n - 1, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
fast_sint_t t, m = 0; for (t = omp_num_threads - 1; t > omp_thread_num; --t) { m += thread_state[t].state.m; }
libsais_gather_lms_suffixes_8u(T, SA, n, (fast_sint_t)n - 1 - m, omp_block_start, omp_block_size);
#pragma omp barrier
if (thread_state[omp_thread_num].state.m > 0)
{
SA[(fast_sint_t)n - 1 - m] = (sa_sint_t)thread_state[omp_thread_num].state.last_lms_suffix;
}
}
#endif
}
}
static sa_sint_t libsais_gather_lms_suffixes_32s(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n)
{
const fast_sint_t prefetch_distance = 32;
sa_sint_t i = n - 2;
sa_sint_t m = n - 1;
fast_uint_t s = 1;
fast_sint_t c0 = T[n - 1];
fast_sint_t c1 = 0;
for (; i >= 3; i -= 4)
{
libsais_prefetch(&T[i - prefetch_distance]);
c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = i + 1; m -= ((s & 3) == 1);
c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = i - 0; m -= ((s & 3) == 1);
c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = i - 1; m -= ((s & 3) == 1);
c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = i - 2; m -= ((s & 3) == 1);
}
for (; i >= 0; i -= 1)
{
c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = i + 1; m -= ((s & 3) == 1);
}
return n - 1 - m;
}
static sa_sint_t libsais_gather_compacted_lms_suffixes_32s(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n)
{
const fast_sint_t prefetch_distance = 32;
sa_sint_t i = n - 2;
sa_sint_t m = n - 1;
fast_uint_t s = 1;
fast_sint_t c0 = T[n - 1];
fast_sint_t c1 = 0;
for (; i >= 3; i -= 4)
{
libsais_prefetch(&T[i - prefetch_distance]);
c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = i + 1; m -= ((fast_sint_t)(s & 3) == (c0 >= 0));
c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = i - 0; m -= ((fast_sint_t)(s & 3) == (c1 >= 0));
c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = i - 1; m -= ((fast_sint_t)(s & 3) == (c0 >= 0));
c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = i - 2; m -= ((fast_sint_t)(s & 3) == (c1 >= 0));
}
for (; i >= 0; i -= 1)
{
c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = i + 1; m -= ((fast_sint_t)(s & 3) == (c1 >= 0));
}
return n - 1 - m;
}
#if defined(_OPENMP)
static void libsais_count_lms_suffixes_32s_4k(const sa_sint_t * RESTRICT T, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets)
{
const fast_sint_t prefetch_distance = 32;
memset(buckets, 0, 4 * (size_t)k * sizeof(sa_sint_t));
sa_sint_t i = n - 2;
fast_uint_t s = 1;
fast_sint_t c0 = T[n - 1];
fast_sint_t c1 = 0;
for (; i >= prefetch_distance + 3; i -= 4)
{
libsais_prefetch(&T[i - 2 * prefetch_distance]);
libsais_prefetchw(&buckets[BUCKETS_INDEX4(T[i - prefetch_distance - 0], 0)]);
libsais_prefetchw(&buckets[BUCKETS_INDEX4(T[i - prefetch_distance - 1], 0)]);
libsais_prefetchw(&buckets[BUCKETS_INDEX4(T[i - prefetch_distance - 2], 0)]);
libsais_prefetchw(&buckets[BUCKETS_INDEX4(T[i - prefetch_distance - 3], 0)]);
c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1)));
buckets[BUCKETS_INDEX4((fast_uint_t)c0, s & 3)]++;
c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1)));
buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]++;
c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1)));
buckets[BUCKETS_INDEX4((fast_uint_t)c0, s & 3)]++;
c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1)));
buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]++;
}
for (; i >= 0; i -= 1)
{
c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1)));
buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]++;
}
buckets[BUCKETS_INDEX4((fast_uint_t)c0, (s << 1) & 3)]++;
}
#endif
static void libsais_count_lms_suffixes_32s_2k(const sa_sint_t * RESTRICT T, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets)
{
const fast_sint_t prefetch_distance = 32;
memset(buckets, 0, 2 * (size_t)k * sizeof(sa_sint_t));
sa_sint_t i = n - 2;
fast_uint_t s = 1;
fast_sint_t c0 = T[n - 1];
fast_sint_t c1 = 0;
for (; i >= prefetch_distance + 3; i -= 4)
{
libsais_prefetch(&T[i - 2 * prefetch_distance]);
libsais_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 0], 0)]);
libsais_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 1], 0)]);
libsais_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 2], 0)]);
libsais_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 3], 0)]);
c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1)));
buckets[BUCKETS_INDEX2((fast_uint_t)c0, (s & 3) == 1)]++;
c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1)));
buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++;
c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1)));
buckets[BUCKETS_INDEX2((fast_uint_t)c0, (s & 3) == 1)]++;
c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1)));
buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++;
}
for (; i >= 0; i -= 1)
{
c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1)));
buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++;
}
buckets[BUCKETS_INDEX2((fast_uint_t)c0, 0)]++;
}
#if defined(_OPENMP)
static void libsais_count_compacted_lms_suffixes_32s_2k(const sa_sint_t * RESTRICT T, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets)
{
const fast_sint_t prefetch_distance = 32;
memset(buckets, 0, 2 * (size_t)k * sizeof(sa_sint_t));
sa_sint_t i = n - 2;
fast_uint_t s = 1;
fast_sint_t c0 = T[n - 1];
fast_sint_t c1 = 0;
for (; i >= prefetch_distance + 3; i -= 4)
{
libsais_prefetch(&T[i - 2 * prefetch_distance]);
libsais_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 0] & SAINT_MAX, 0)]);
libsais_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 1] & SAINT_MAX, 0)]);
libsais_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 2] & SAINT_MAX, 0)]);
libsais_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 3] & SAINT_MAX, 0)]);
c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1)));
c0 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c0, (s & 3) == 1)]++;
c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1)));
c1 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++;
c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1)));
c0 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c0, (s & 3) == 1)]++;
c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1)));
c1 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++;
}
for (; i >= 0; i -= 1)
{
c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1)));
c1 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++;
}
c0 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c0, 0)]++;
}
#endif
static sa_sint_t libsais_count_and_gather_lms_suffixes_8u(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT buckets, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
memset(buckets, 0, 4 * ALPHABET_SIZE * sizeof(sa_sint_t));
fast_sint_t m = omp_block_start + omp_block_size - 1;
if (omp_block_size > 0)
{
const fast_sint_t prefetch_distance = 128;
fast_sint_t i, j = m + 1, c0 = T[m], c1 = -1;
while (j < n && (c1 = T[j]) == c0) { ++j; }
fast_uint_t s = c0 >= c1;
for (i = m - 1, j = omp_block_start + 3; i >= j; i -= 4)
{
libsais_prefetch(&T[i - prefetch_distance]);
c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1);
buckets[BUCKETS_INDEX4((fast_uint_t)c0, s & 3)]++;
c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 0); m -= ((s & 3) == 1);
buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]++;
c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 1); m -= ((s & 3) == 1);
buckets[BUCKETS_INDEX4((fast_uint_t)c0, s & 3)]++;
c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 2); m -= ((s & 3) == 1);
buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]++;
}
for (j -= 3; i >= j; i -= 1)
{
c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1);
buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]++;
}
c1 = (i >= 0) ? T[i] : -1; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1);
buckets[BUCKETS_INDEX4((fast_uint_t)c0, s & 3)]++;
}
return (sa_sint_t)(omp_block_start + omp_block_size - 1 - m);
}
static sa_sint_t libsais_count_and_gather_lms_suffixes_8u_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
sa_sint_t m = 0;
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536 && omp_get_dynamic() == 0)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(thread_state);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (n / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : n - omp_block_start;
if (omp_num_threads == 1)
{
m = libsais_count_and_gather_lms_suffixes_8u(T, SA, n, buckets, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
thread_state[omp_thread_num].state.position = omp_block_start + omp_block_size;
thread_state[omp_thread_num].state.m = libsais_count_and_gather_lms_suffixes_8u(T, SA, n, thread_state[omp_thread_num].state.buckets, omp_block_start, omp_block_size);
if (thread_state[omp_thread_num].state.m > 0)
{
thread_state[omp_thread_num].state.last_lms_suffix = SA[thread_state[omp_thread_num].state.position - 1];
}
}
#pragma omp barrier
#pragma omp master
{
memset(buckets, 0, 4 * ALPHABET_SIZE * sizeof(sa_sint_t));
fast_sint_t t;
for (t = omp_num_threads - 1; t >= 0; --t)
{
m += (sa_sint_t)thread_state[t].state.m;
if (t != omp_num_threads - 1 && thread_state[t].state.m > 0)
{
memcpy(&SA[n - m], &SA[thread_state[t].state.position - thread_state[t].state.m], (size_t)thread_state[t].state.m * sizeof(sa_sint_t));
}
{
sa_sint_t * RESTRICT temp_bucket = thread_state[t].state.buckets;
fast_sint_t s; for (s = 0; s < 4 * ALPHABET_SIZE; s += 1) { sa_sint_t A = buckets[s], B = temp_bucket[s]; buckets[s] = A + B; temp_bucket[s] = A; }
}
}
}
}
#endif
}
return m;
}
static sa_sint_t libsais_count_and_gather_lms_suffixes_32s_4k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
memset(buckets, 0, 4 * (size_t)k * sizeof(sa_sint_t));
fast_sint_t m = omp_block_start + omp_block_size - 1;
if (omp_block_size > 0)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j = m + 1, c0 = T[m], c1 = -1;
while (j < n && (c1 = T[j]) == c0) { ++j; }
fast_uint_t s = c0 >= c1;
for (i = m - 1, j = omp_block_start + prefetch_distance + 3; i >= j; i -= 4)
{
libsais_prefetch(&T[i - 2 * prefetch_distance]);
libsais_prefetchw(&buckets[BUCKETS_INDEX4(T[i - prefetch_distance - 0], 0)]);
libsais_prefetchw(&buckets[BUCKETS_INDEX4(T[i - prefetch_distance - 1], 0)]);
libsais_prefetchw(&buckets[BUCKETS_INDEX4(T[i - prefetch_distance - 2], 0)]);
libsais_prefetchw(&buckets[BUCKETS_INDEX4(T[i - prefetch_distance - 3], 0)]);
c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1);
buckets[BUCKETS_INDEX4((fast_uint_t)c0, s & 3)]++;
c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 0); m -= ((s & 3) == 1);
buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]++;
c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 1); m -= ((s & 3) == 1);
buckets[BUCKETS_INDEX4((fast_uint_t)c0, s & 3)]++;
c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 2); m -= ((s & 3) == 1);
buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]++;
}
for (j -= prefetch_distance + 3; i >= j; i -= 1)
{
c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1);
buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]++;
}
c1 = (i >= 0) ? T[i] : -1; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1);
buckets[BUCKETS_INDEX4((fast_uint_t)c0, s & 3)]++;
}
return (sa_sint_t)(omp_block_start + omp_block_size - 1 - m);
}
static sa_sint_t libsais_count_and_gather_lms_suffixes_32s_2k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
memset(buckets, 0, 2 * (size_t)k * sizeof(sa_sint_t));
fast_sint_t m = omp_block_start + omp_block_size - 1;
if (omp_block_size > 0)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j = m + 1, c0 = T[m], c1 = -1;
while (j < n && (c1 = T[j]) == c0) { ++j; }
fast_uint_t s = c0 >= c1;
for (i = m - 1, j = omp_block_start + prefetch_distance + 3; i >= j; i -= 4)
{
libsais_prefetch(&T[i - 2 * prefetch_distance]);
libsais_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 0], 0)]);
libsais_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 1], 0)]);
libsais_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 2], 0)]);
libsais_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 3], 0)]);
c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1);
buckets[BUCKETS_INDEX2((fast_uint_t)c0, (s & 3) == 1)]++;
c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 0); m -= ((s & 3) == 1);
buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++;
c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 1); m -= ((s & 3) == 1);
buckets[BUCKETS_INDEX2((fast_uint_t)c0, (s & 3) == 1)]++;
c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 2); m -= ((s & 3) == 1);
buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++;
}
for (j -= prefetch_distance + 3; i >= j; i -= 1)
{
c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1);
buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++;
}
c1 = (i >= 0) ? T[i] : -1; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1);
buckets[BUCKETS_INDEX2((fast_uint_t)c0, (s & 3) == 1)]++;
}
return (sa_sint_t)(omp_block_start + omp_block_size - 1 - m);
}
static sa_sint_t libsais_count_and_gather_compacted_lms_suffixes_32s_2k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
memset(buckets, 0, 2 * (size_t)k * sizeof(sa_sint_t));
fast_sint_t m = omp_block_start + omp_block_size - 1;
if (omp_block_size > 0)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j = m + 1, c0 = T[m], c1 = -1;
while (j < n && (c1 = T[j]) == c0) { ++j; }
fast_uint_t s = c0 >= c1;
for (i = m - 1, j = omp_block_start + prefetch_distance + 3; i >= j; i -= 4)
{
libsais_prefetch(&T[i - 2 * prefetch_distance]);
libsais_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 0] & SAINT_MAX, 0)]);
libsais_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 1] & SAINT_MAX, 0)]);
libsais_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 2] & SAINT_MAX, 0)]);
libsais_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 3] & SAINT_MAX, 0)]);
c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((fast_sint_t)(s & 3) == (c0 >= 0));
c0 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c0, (s & 3) == 1)]++;
c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 0); m -= ((fast_sint_t)(s & 3) == (c1 >= 0));
c1 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++;
c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 1); m -= ((fast_sint_t)(s & 3) == (c0 >= 0));
c0 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c0, (s & 3) == 1)]++;
c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 2); m -= ((fast_sint_t)(s & 3) == (c1 >= 0));
c1 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++;
}
for (j -= prefetch_distance + 3; i >= j; i -= 1)
{
c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((fast_sint_t)(s & 3) == (c1 >= 0));
c1 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++;
}
c1 = (i >= 0) ? T[i] : -1; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((fast_sint_t)(s & 3) == (c0 >= 0));
c0 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c0, (s & 3) == 1)]++;
}
return (sa_sint_t)(omp_block_start + omp_block_size - 1 - m);
}
#if defined(_OPENMP)
static fast_sint_t libsais_get_bucket_stride(fast_sint_t free_space, fast_sint_t bucket_size, fast_sint_t num_buckets)
{
fast_sint_t bucket_size_1024 = (bucket_size + 1023) & (-1024); if (free_space / (num_buckets - 1) >= bucket_size_1024) { return bucket_size_1024; }
fast_sint_t bucket_size_16 = (bucket_size + 15) & (-16); if (free_space / (num_buckets - 1) >= bucket_size_16) { return bucket_size_16; }
return bucket_size;
}
static sa_sint_t libsais_count_and_gather_lms_suffixes_32s_4k_fs_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
sa_sint_t m = 0;
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(thread_state);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (n / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : n - omp_block_start;
if (omp_num_threads == 1)
{
m = libsais_count_and_gather_lms_suffixes_32s_4k(T, SA, n, k, buckets, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
fast_sint_t bucket_size = 4 * (fast_sint_t)k;
fast_sint_t bucket_stride = libsais_get_bucket_stride(buckets - &SA[n], bucket_size, omp_num_threads);
{
thread_state[omp_thread_num].state.position = omp_block_start + omp_block_size;
thread_state[omp_thread_num].state.count = libsais_count_and_gather_lms_suffixes_32s_4k(T, SA, n, k, buckets - (omp_thread_num * bucket_stride), omp_block_start, omp_block_size);
}
#pragma omp barrier
if (omp_thread_num == omp_num_threads - 1)
{
fast_sint_t t;
for (t = omp_num_threads - 1; t >= 0; --t)
{
m += (sa_sint_t)thread_state[t].state.count;
if (t != omp_num_threads - 1 && thread_state[t].state.count > 0)
{
memcpy(&SA[n - m], &SA[thread_state[t].state.position - thread_state[t].state.count], (size_t)thread_state[t].state.count * sizeof(sa_sint_t));
}
}
}
else
{
omp_num_threads = omp_num_threads - 1;
omp_block_stride = (bucket_size / omp_num_threads) & (-16);
omp_block_start = omp_thread_num * omp_block_stride;
omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : bucket_size - omp_block_start;
libsais_accumulate_counts_s32(buckets + omp_block_start, omp_block_size, bucket_stride, omp_num_threads + 1);
}
}
#endif
}
return m;
}
static sa_sint_t libsais_count_and_gather_lms_suffixes_32s_2k_fs_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
sa_sint_t m = 0;
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(thread_state);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (n / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : n - omp_block_start;
if (omp_num_threads == 1)
{
m = libsais_count_and_gather_lms_suffixes_32s_2k(T, SA, n, k, buckets, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
fast_sint_t bucket_size = 2 * (fast_sint_t)k;
fast_sint_t bucket_stride = libsais_get_bucket_stride(buckets - &SA[n], bucket_size, omp_num_threads);
{
thread_state[omp_thread_num].state.position = omp_block_start + omp_block_size;
thread_state[omp_thread_num].state.count = libsais_count_and_gather_lms_suffixes_32s_2k(T, SA, n, k, buckets - (omp_thread_num * bucket_stride), omp_block_start, omp_block_size);
}
#pragma omp barrier
if (omp_thread_num == omp_num_threads - 1)
{
fast_sint_t t;
for (t = omp_num_threads - 1; t >= 0; --t)
{
m += (sa_sint_t)thread_state[t].state.count;
if (t != omp_num_threads - 1 && thread_state[t].state.count > 0)
{
memcpy(&SA[n - m], &SA[thread_state[t].state.position - thread_state[t].state.count], (size_t)thread_state[t].state.count * sizeof(sa_sint_t));
}
}
}
else
{
omp_num_threads = omp_num_threads - 1;
omp_block_stride = (bucket_size / omp_num_threads) & (-16);
omp_block_start = omp_thread_num * omp_block_stride;
omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : bucket_size - omp_block_start;
libsais_accumulate_counts_s32(buckets + omp_block_start, omp_block_size, bucket_stride, omp_num_threads + 1);
}
}
#endif
}
return m;
}
static void libsais_count_and_gather_compacted_lms_suffixes_32s_2k_fs_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(thread_state);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (n / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : n - omp_block_start;
if (omp_num_threads == 1)
{
libsais_count_and_gather_compacted_lms_suffixes_32s_2k(T, SA, n, k, buckets, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
fast_sint_t bucket_size = 2 * (fast_sint_t)k;
fast_sint_t bucket_stride = libsais_get_bucket_stride(buckets - &SA[n + n], bucket_size, omp_num_threads);
{
thread_state[omp_thread_num].state.position = omp_block_start + omp_block_size;
thread_state[omp_thread_num].state.count = libsais_count_and_gather_compacted_lms_suffixes_32s_2k(T, SA + n, n, k, buckets - (omp_thread_num * bucket_stride), omp_block_start, omp_block_size);
}
#pragma omp barrier
{
fast_sint_t t, m = 0; for (t = omp_num_threads - 1; t >= omp_thread_num; --t) { m += (sa_sint_t)thread_state[t].state.count; }
if (thread_state[omp_thread_num].state.count > 0)
{
memcpy(&SA[n - m], &SA[n + thread_state[omp_thread_num].state.position - thread_state[omp_thread_num].state.count], (size_t)thread_state[omp_thread_num].state.count * sizeof(sa_sint_t));
}
}
{
omp_block_stride = (bucket_size / omp_num_threads) & (-16);
omp_block_start = omp_thread_num * omp_block_stride;
omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : bucket_size - omp_block_start;
libsais_accumulate_counts_s32(buckets + omp_block_start, omp_block_size, bucket_stride, omp_num_threads);
}
}
#endif
}
}
#endif
static sa_sint_t libsais_count_and_gather_lms_suffixes_32s_4k_nofs_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads)
{
sa_sint_t m = 0;
#if defined(_OPENMP)
#pragma omp parallel num_threads(2) if(threads > 1 && n >= 65536)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads);
fast_sint_t omp_num_threads = 1;
#endif
if (omp_num_threads == 1)
{
m = libsais_count_and_gather_lms_suffixes_32s_4k(T, SA, n, k, buckets, 0, n);
}
#if defined(_OPENMP)
else if (omp_thread_num == 0)
{
libsais_count_lms_suffixes_32s_4k(T, n, k, buckets);
}
else
{
m = libsais_gather_lms_suffixes_32s(T, SA, n);
}
#endif
}
return m;
}
static sa_sint_t libsais_count_and_gather_lms_suffixes_32s_2k_nofs_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads)
{
sa_sint_t m = 0;
#if defined(_OPENMP)
#pragma omp parallel num_threads(2) if(threads > 1 && n >= 65536)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads);
fast_sint_t omp_num_threads = 1;
#endif
if (omp_num_threads == 1)
{
m = libsais_count_and_gather_lms_suffixes_32s_2k(T, SA, n, k, buckets, 0, n);
}
#if defined(_OPENMP)
else if (omp_thread_num == 0)
{
libsais_count_lms_suffixes_32s_2k(T, n, k, buckets);
}
else
{
m = libsais_gather_lms_suffixes_32s(T, SA, n);
}
#endif
}
return m;
}
static sa_sint_t libsais_count_and_gather_compacted_lms_suffixes_32s_2k_nofs_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads)
{
sa_sint_t m = 0;
#if defined(_OPENMP)
#pragma omp parallel num_threads(2) if(threads > 1 && n >= 65536)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads);
fast_sint_t omp_num_threads = 1;
#endif
if (omp_num_threads == 1)
{
m = libsais_count_and_gather_compacted_lms_suffixes_32s_2k(T, SA, n, k, buckets, 0, n);
}
#if defined(_OPENMP)
else if (omp_thread_num == 0)
{
libsais_count_compacted_lms_suffixes_32s_2k(T, n, k, buckets);
}
else
{
m = libsais_gather_compacted_lms_suffixes_32s(T, SA, n);
}
#endif
}
return m;
}
static sa_sint_t libsais_count_and_gather_lms_suffixes_32s_4k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
sa_sint_t m;
#if defined(_OPENMP)
sa_sint_t max_threads = (sa_sint_t)((buckets - &SA[n]) / ((4 * (fast_sint_t)k + 15) & (-16))); if (max_threads > threads) { max_threads = threads; }
if (max_threads > 1 && n >= 65536 && n / k >= 2)
{
if (max_threads > n / 16 / k) { max_threads = n / 16 / k; }
m = libsais_count_and_gather_lms_suffixes_32s_4k_fs_omp(T, SA, n, k, buckets, max_threads > 2 ? max_threads : 2, thread_state);
}
else
#else
UNUSED(thread_state);
#endif
{
m = libsais_count_and_gather_lms_suffixes_32s_4k_nofs_omp(T, SA, n, k, buckets, threads);
}
return m;
}
static sa_sint_t libsais_count_and_gather_lms_suffixes_32s_2k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
sa_sint_t m;
#if defined(_OPENMP)
sa_sint_t max_threads = (sa_sint_t)((buckets - &SA[n]) / ((2 * (fast_sint_t)k + 15) & (-16))); if (max_threads > threads) { max_threads = threads; }
if (max_threads > 1 && n >= 65536 && n / k >= 2)
{
if (max_threads > n / 8 / k) { max_threads = n / 8 / k; }
m = libsais_count_and_gather_lms_suffixes_32s_2k_fs_omp(T, SA, n, k, buckets, max_threads > 2 ? max_threads : 2, thread_state);
}
else
#else
UNUSED(thread_state);
#endif
{
m = libsais_count_and_gather_lms_suffixes_32s_2k_nofs_omp(T, SA, n, k, buckets, threads);
}
return m;
}
static void libsais_count_and_gather_compacted_lms_suffixes_32s_2k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
#if defined(_OPENMP)
sa_sint_t max_threads = (sa_sint_t)((buckets - &SA[n + n]) / ((2 * (fast_sint_t)k + 15) & (-16))); if (max_threads > threads) { max_threads = threads; }
if (max_threads > 1 && n >= 65536 && n / k >= 2)
{
if (max_threads > n / 8 / k) { max_threads = n / 8 / k; }
libsais_count_and_gather_compacted_lms_suffixes_32s_2k_fs_omp(T, SA, n, k, buckets, max_threads > 2 ? max_threads : 2, thread_state);
}
else
#else
UNUSED(thread_state);
#endif
{
libsais_count_and_gather_compacted_lms_suffixes_32s_2k_nofs_omp(T, SA, n, k, buckets, threads);
}
}
static void libsais_count_suffixes_32s(const sa_sint_t * RESTRICT T, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets)
{
const fast_sint_t prefetch_distance = 32;
memset(buckets, 0, (size_t)k * sizeof(sa_sint_t));
fast_sint_t i, j;
for (i = 0, j = (fast_sint_t)n - 7; i < j; i += 8)
{
libsais_prefetch(&T[i + prefetch_distance]);
buckets[T[i + 0]]++;
buckets[T[i + 1]]++;
buckets[T[i + 2]]++;
buckets[T[i + 3]]++;
buckets[T[i + 4]]++;
buckets[T[i + 5]]++;
buckets[T[i + 6]]++;
buckets[T[i + 7]]++;
}
for (j += 7; i < j; i += 1)
{
buckets[T[i]]++;
}
}
static void libsais_initialize_buckets_start_and_end_8u(sa_sint_t * RESTRICT buckets)
{
sa_sint_t * RESTRICT bucket_start = &buckets[6 * ALPHABET_SIZE];
sa_sint_t * RESTRICT bucket_end = &buckets[7 * ALPHABET_SIZE];
fast_sint_t i, j; sa_sint_t sum = 0;
for (i = BUCKETS_INDEX4(0, 0), j = 0; i <= BUCKETS_INDEX4(UCHAR_MAX, 0); i += BUCKETS_INDEX4(1, 0), j += 1)
{
bucket_start[j] = sum;
sum += buckets[i + BUCKETS_INDEX4(0, 0)] + buckets[i + BUCKETS_INDEX4(0, 1)] + buckets[i + BUCKETS_INDEX4(0, 2)] + buckets[i + BUCKETS_INDEX4(0, 3)];
bucket_end[j] = sum;
}
}
static void libsais_initialize_buckets_start_and_end_32s_6k(sa_sint_t k, sa_sint_t * RESTRICT buckets)
{
sa_sint_t * RESTRICT bucket_start = &buckets[4 * k];
sa_sint_t * RESTRICT bucket_end = &buckets[5 * k];
fast_sint_t i, j; sa_sint_t sum = 0;
for (i = BUCKETS_INDEX4(0, 0), j = 0; i <= BUCKETS_INDEX4((fast_sint_t)k - 1, 0); i += BUCKETS_INDEX4(1, 0), j += 1)
{
bucket_start[j] = sum;
sum += buckets[i + BUCKETS_INDEX4(0, 0)] + buckets[i + BUCKETS_INDEX4(0, 1)] + buckets[i + BUCKETS_INDEX4(0, 2)] + buckets[i + BUCKETS_INDEX4(0, 3)];
bucket_end[j] = sum;
}
}
static void libsais_initialize_buckets_start_and_end_32s_4k(sa_sint_t k, sa_sint_t * RESTRICT buckets)
{
sa_sint_t * RESTRICT bucket_start = &buckets[2 * k];
sa_sint_t * RESTRICT bucket_end = &buckets[3 * k];
fast_sint_t i, j; sa_sint_t sum = 0;
for (i = BUCKETS_INDEX2(0, 0), j = 0; i <= BUCKETS_INDEX2((fast_sint_t)k - 1, 0); i += BUCKETS_INDEX2(1, 0), j += 1)
{
bucket_start[j] = sum;
sum += buckets[i + BUCKETS_INDEX2(0, 0)] + buckets[i + BUCKETS_INDEX2(0, 1)];
bucket_end[j] = sum;
}
}
static void libsais_initialize_buckets_end_32s_2k(sa_sint_t k, sa_sint_t * RESTRICT buckets)
{
fast_sint_t i; sa_sint_t sum0 = 0;
for (i = BUCKETS_INDEX2(0, 0); i <= BUCKETS_INDEX2((fast_sint_t)k - 1, 0); i += BUCKETS_INDEX2(1, 0))
{
sum0 += buckets[i + BUCKETS_INDEX2(0, 0)] + buckets[i + BUCKETS_INDEX2(0, 1)]; buckets[i + BUCKETS_INDEX2(0, 0)] = sum0;
}
}
static void libsais_initialize_buckets_start_and_end_32s_2k(sa_sint_t k, sa_sint_t * RESTRICT buckets)
{
fast_sint_t i, j;
for (i = BUCKETS_INDEX2(0, 0), j = 0; i <= BUCKETS_INDEX2((fast_sint_t)k - 1, 0); i += BUCKETS_INDEX2(1, 0), j += 1)
{
buckets[j] = buckets[i];
}
buckets[k] = 0; memcpy(&buckets[k + 1], buckets, ((size_t)k - 1) * sizeof(sa_sint_t));
}
static void libsais_initialize_buckets_start_32s_1k(sa_sint_t k, sa_sint_t * RESTRICT buckets)
{
fast_sint_t i; sa_sint_t sum = 0;
for (i = 0; i <= (fast_sint_t)k - 1; i += 1) { sa_sint_t tmp = buckets[i]; buckets[i] = sum; sum += tmp; }
}
static void libsais_initialize_buckets_end_32s_1k(sa_sint_t k, sa_sint_t * RESTRICT buckets)
{
fast_sint_t i; sa_sint_t sum = 0;
for (i = 0; i <= (fast_sint_t)k - 1; i += 1) { sum += buckets[i]; buckets[i] = sum; }
}
static sa_sint_t libsais_initialize_buckets_for_lms_suffixes_radix_sort_8u(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT buckets, sa_sint_t first_lms_suffix)
{
{
fast_uint_t s = 0;
fast_sint_t c0 = T[first_lms_suffix];
fast_sint_t c1 = 0;
for (; --first_lms_suffix >= 0; )
{
c1 = c0; c0 = T[first_lms_suffix]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1)));
buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]--;
}
buckets[BUCKETS_INDEX4((fast_uint_t)c0, (s << 1) & 3)]--;
}
{
sa_sint_t * RESTRICT temp_bucket = &buckets[4 * ALPHABET_SIZE];
fast_sint_t i, j; sa_sint_t sum = 0;
for (i = BUCKETS_INDEX4(0, 0), j = BUCKETS_INDEX2(0, 0); i <= BUCKETS_INDEX4(UCHAR_MAX, 0); i += BUCKETS_INDEX4(1, 0), j += BUCKETS_INDEX2(1, 0))
{
temp_bucket[j + BUCKETS_INDEX2(0, 1)] = sum; sum += buckets[i + BUCKETS_INDEX4(0, 1)] + buckets[i + BUCKETS_INDEX4(0, 3)]; temp_bucket[j] = sum;
}
return sum;
}
}
static void libsais_initialize_buckets_for_lms_suffixes_radix_sort_32s_2k(const sa_sint_t * RESTRICT T, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t first_lms_suffix)
{
buckets[BUCKETS_INDEX2(T[first_lms_suffix], 0)]++;
buckets[BUCKETS_INDEX2(T[first_lms_suffix], 1)]--;
fast_sint_t i; sa_sint_t sum0 = 0, sum1 = 0;
for (i = BUCKETS_INDEX2(0, 0); i <= BUCKETS_INDEX2((fast_sint_t)k - 1, 0); i += BUCKETS_INDEX2(1, 0))
{
sum0 += buckets[i + BUCKETS_INDEX2(0, 0)] + buckets[i + BUCKETS_INDEX2(0, 1)];
sum1 += buckets[i + BUCKETS_INDEX2(0, 1)];
buckets[i + BUCKETS_INDEX2(0, 0)] = sum0;
buckets[i + BUCKETS_INDEX2(0, 1)] = sum1;
}
}
static sa_sint_t libsais_initialize_buckets_for_lms_suffixes_radix_sort_32s_6k(const sa_sint_t * RESTRICT T, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t first_lms_suffix)
{
{
fast_uint_t s = 0;
fast_sint_t c0 = T[first_lms_suffix];
fast_sint_t c1 = 0;
for (; --first_lms_suffix >= 0; )
{
c1 = c0; c0 = T[first_lms_suffix]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1)));
buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]--;
}
buckets[BUCKETS_INDEX4((fast_uint_t)c0, (s << 1) & 3)]--;
}
{
sa_sint_t * RESTRICT temp_bucket = &buckets[4 * k];
fast_sint_t i, j; sa_sint_t sum = 0;
for (i = BUCKETS_INDEX4(0, 0), j = 0; i <= BUCKETS_INDEX4((fast_sint_t)k - 1, 0); i += BUCKETS_INDEX4(1, 0), j += 1)
{
sum += buckets[i + BUCKETS_INDEX4(0, 1)] + buckets[i + BUCKETS_INDEX4(0, 3)]; temp_bucket[j] = sum;
}
return sum;
}
}
static void libsais_initialize_buckets_for_radix_and_partial_sorting_32s_4k(const sa_sint_t * RESTRICT T, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t first_lms_suffix)
{
sa_sint_t * RESTRICT bucket_start = &buckets[2 * k];
sa_sint_t * RESTRICT bucket_end = &buckets[3 * k];
buckets[BUCKETS_INDEX2(T[first_lms_suffix], 0)]++;
buckets[BUCKETS_INDEX2(T[first_lms_suffix], 1)]--;
fast_sint_t i, j; sa_sint_t sum0 = 0, sum1 = 0;
for (i = BUCKETS_INDEX2(0, 0), j = 0; i <= BUCKETS_INDEX2((fast_sint_t)k - 1, 0); i += BUCKETS_INDEX2(1, 0), j += 1)
{
bucket_start[j] = sum1;
sum0 += buckets[i + BUCKETS_INDEX2(0, 1)];
sum1 += buckets[i + BUCKETS_INDEX2(0, 0)] + buckets[i + BUCKETS_INDEX2(0, 1)];
buckets[i + BUCKETS_INDEX2(0, 1)] = sum0;
bucket_end[j] = sum1;
}
}
static void libsais_radix_sort_lms_suffixes_8u(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 3; i >= j; i -= 4)
{
libsais_prefetch(&SA[i - 2 * prefetch_distance]);
libsais_prefetch(&T[SA[i - prefetch_distance - 0]]);
libsais_prefetch(&T[SA[i - prefetch_distance - 1]]);
libsais_prefetch(&T[SA[i - prefetch_distance - 2]]);
libsais_prefetch(&T[SA[i - prefetch_distance - 3]]);
sa_sint_t p0 = SA[i - 0]; SA[--induction_bucket[BUCKETS_INDEX2(T[p0], 0)]] = p0;
sa_sint_t p1 = SA[i - 1]; SA[--induction_bucket[BUCKETS_INDEX2(T[p1], 0)]] = p1;
sa_sint_t p2 = SA[i - 2]; SA[--induction_bucket[BUCKETS_INDEX2(T[p2], 0)]] = p2;
sa_sint_t p3 = SA[i - 3]; SA[--induction_bucket[BUCKETS_INDEX2(T[p3], 0)]] = p3;
}
for (j -= prefetch_distance + 3; i >= j; i -= 1)
{
sa_sint_t p = SA[i]; SA[--induction_bucket[BUCKETS_INDEX2(T[p], 0)]] = p;
}
}
static void libsais_radix_sort_lms_suffixes_8u_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536 && m >= 65536 && omp_get_dynamic() == 0)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(thread_state);
fast_sint_t omp_num_threads = 1;
#endif
if (omp_num_threads == 1)
{
libsais_radix_sort_lms_suffixes_8u(T, SA, &buckets[4 * ALPHABET_SIZE], (fast_sint_t)n - (fast_sint_t)m + 1, (fast_sint_t)m - 1);
}
#if defined(_OPENMP)
else
{
{
sa_sint_t * RESTRICT src_bucket = &buckets[4 * ALPHABET_SIZE];
sa_sint_t * RESTRICT dst_bucket = thread_state[omp_thread_num].state.buckets;
fast_sint_t i, j;
for (i = BUCKETS_INDEX2(0, 0), j = BUCKETS_INDEX4(0, 1); i <= BUCKETS_INDEX2(UCHAR_MAX, 0); i += BUCKETS_INDEX2(1, 0), j += BUCKETS_INDEX4(1, 0))
{
dst_bucket[i] = src_bucket[i] - dst_bucket[j];
}
}
{
fast_sint_t t, omp_block_start = 0, omp_block_size = thread_state[omp_thread_num].state.m;
for (t = omp_num_threads - 1; t >= omp_thread_num; --t) omp_block_start += thread_state[t].state.m;
if (omp_block_start == (fast_sint_t)m && omp_block_size > 0)
{
omp_block_start -= 1; omp_block_size -= 1;
}
libsais_radix_sort_lms_suffixes_8u(T, SA, thread_state[omp_thread_num].state.buckets, (fast_sint_t)n - omp_block_start, omp_block_size);
}
}
#endif
}
}
static void libsais_radix_sort_lms_suffixes_32s_6k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + 2 * prefetch_distance + 3; i >= j; i -= 4)
{
libsais_prefetch(&SA[i - 3 * prefetch_distance]);
libsais_prefetch(&T[SA[i - 2 * prefetch_distance - 0]]);
libsais_prefetch(&T[SA[i - 2 * prefetch_distance - 1]]);
libsais_prefetch(&T[SA[i - 2 * prefetch_distance - 2]]);
libsais_prefetch(&T[SA[i - 2 * prefetch_distance - 3]]);
libsais_prefetchw(&induction_bucket[T[SA[i - prefetch_distance - 0]]]);
libsais_prefetchw(&induction_bucket[T[SA[i - prefetch_distance - 1]]]);
libsais_prefetchw(&induction_bucket[T[SA[i - prefetch_distance - 2]]]);
libsais_prefetchw(&induction_bucket[T[SA[i - prefetch_distance - 3]]]);
sa_sint_t p0 = SA[i - 0]; SA[--induction_bucket[T[p0]]] = p0;
sa_sint_t p1 = SA[i - 1]; SA[--induction_bucket[T[p1]]] = p1;
sa_sint_t p2 = SA[i - 2]; SA[--induction_bucket[T[p2]]] = p2;
sa_sint_t p3 = SA[i - 3]; SA[--induction_bucket[T[p3]]] = p3;
}
for (j -= 2 * prefetch_distance + 3; i >= j; i -= 1)
{
sa_sint_t p = SA[i]; SA[--induction_bucket[T[p]]] = p;
}
}
static void libsais_radix_sort_lms_suffixes_32s_2k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + 2 * prefetch_distance + 3; i >= j; i -= 4)
{
libsais_prefetch(&SA[i - 3 * prefetch_distance]);
libsais_prefetch(&T[SA[i - 2 * prefetch_distance - 0]]);
libsais_prefetch(&T[SA[i - 2 * prefetch_distance - 1]]);
libsais_prefetch(&T[SA[i - 2 * prefetch_distance - 2]]);
libsais_prefetch(&T[SA[i - 2 * prefetch_distance - 3]]);
libsais_prefetchw(&induction_bucket[BUCKETS_INDEX2(T[SA[i - prefetch_distance - 0]], 0)]);
libsais_prefetchw(&induction_bucket[BUCKETS_INDEX2(T[SA[i - prefetch_distance - 1]], 0)]);
libsais_prefetchw(&induction_bucket[BUCKETS_INDEX2(T[SA[i - prefetch_distance - 2]], 0)]);
libsais_prefetchw(&induction_bucket[BUCKETS_INDEX2(T[SA[i - prefetch_distance - 3]], 0)]);
sa_sint_t p0 = SA[i - 0]; SA[--induction_bucket[BUCKETS_INDEX2(T[p0], 0)]] = p0;
sa_sint_t p1 = SA[i - 1]; SA[--induction_bucket[BUCKETS_INDEX2(T[p1], 0)]] = p1;
sa_sint_t p2 = SA[i - 2]; SA[--induction_bucket[BUCKETS_INDEX2(T[p2], 0)]] = p2;
sa_sint_t p3 = SA[i - 3]; SA[--induction_bucket[BUCKETS_INDEX2(T[p3], 0)]] = p3;
}
for (j -= 2 * prefetch_distance + 3; i >= j; i -= 1)
{
sa_sint_t p = SA[i]; SA[--induction_bucket[BUCKETS_INDEX2(T[p], 0)]] = p;
}
}
#if defined(_OPENMP)
static void libsais_radix_sort_lms_suffixes_32s_block_gather(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 3; i < j; i += 4)
{
libsais_prefetch(&SA[i + 2 * prefetch_distance]);
libsais_prefetch(&T[SA[i + prefetch_distance + 0]]);
libsais_prefetch(&T[SA[i + prefetch_distance + 1]]);
libsais_prefetch(&T[SA[i + prefetch_distance + 2]]);
libsais_prefetch(&T[SA[i + prefetch_distance + 3]]);
libsais_prefetchw(&cache[i + prefetch_distance]);
cache[i + 0].symbol = T[cache[i + 0].index = SA[i + 0]];
cache[i + 1].symbol = T[cache[i + 1].index = SA[i + 1]];
cache[i + 2].symbol = T[cache[i + 2].index = SA[i + 2]];
cache[i + 3].symbol = T[cache[i + 3].index = SA[i + 3]];
}
for (j += prefetch_distance + 3; i < j; i += 1)
{
cache[i].symbol = T[cache[i].index = SA[i]];
}
}
static void libsais_radix_sort_lms_suffixes_32s_6k_block_sort(sa_sint_t * RESTRICT induction_bucket, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 3; i >= j; i -= 4)
{
libsais_prefetchw(&cache[i - 2 * prefetch_distance]);
libsais_prefetchw(&induction_bucket[cache[i - prefetch_distance - 0].symbol]);
libsais_prefetchw(&induction_bucket[cache[i - prefetch_distance - 1].symbol]);
libsais_prefetchw(&induction_bucket[cache[i - prefetch_distance - 2].symbol]);
libsais_prefetchw(&induction_bucket[cache[i - prefetch_distance - 3].symbol]);
cache[i - 0].symbol = --induction_bucket[cache[i - 0].symbol];
cache[i - 1].symbol = --induction_bucket[cache[i - 1].symbol];
cache[i - 2].symbol = --induction_bucket[cache[i - 2].symbol];
cache[i - 3].symbol = --induction_bucket[cache[i - 3].symbol];
}
for (j -= prefetch_distance + 3; i >= j; i -= 1)
{
cache[i].symbol = --induction_bucket[cache[i].symbol];
}
}
static void libsais_radix_sort_lms_suffixes_32s_2k_block_sort(sa_sint_t * RESTRICT induction_bucket, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 3; i >= j; i -= 4)
{
libsais_prefetchw(&cache[i - 2 * prefetch_distance]);
libsais_prefetchw(&induction_bucket[BUCKETS_INDEX2(cache[i - prefetch_distance - 0].symbol, 0)]);
libsais_prefetchw(&induction_bucket[BUCKETS_INDEX2(cache[i - prefetch_distance - 1].symbol, 0)]);
libsais_prefetchw(&induction_bucket[BUCKETS_INDEX2(cache[i - prefetch_distance - 2].symbol, 0)]);
libsais_prefetchw(&induction_bucket[BUCKETS_INDEX2(cache[i - prefetch_distance - 3].symbol, 0)]);
cache[i - 0].symbol = --induction_bucket[BUCKETS_INDEX2(cache[i - 0].symbol, 0)];
cache[i - 1].symbol = --induction_bucket[BUCKETS_INDEX2(cache[i - 1].symbol, 0)];
cache[i - 2].symbol = --induction_bucket[BUCKETS_INDEX2(cache[i - 2].symbol, 0)];
cache[i - 3].symbol = --induction_bucket[BUCKETS_INDEX2(cache[i - 3].symbol, 0)];
}
for (j -= prefetch_distance + 3; i >= j; i -= 1)
{
cache[i].symbol = --induction_bucket[BUCKETS_INDEX2(cache[i].symbol, 0)];
}
}
static void libsais_radix_sort_lms_suffixes_32s_6k_block_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(cache);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start;
omp_block_start += block_start;
if (omp_num_threads == 1)
{
libsais_radix_sort_lms_suffixes_32s_6k(T, SA, induction_bucket, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
libsais_radix_sort_lms_suffixes_32s_block_gather(T, SA, cache - block_start, omp_block_start, omp_block_size);
}
#pragma omp barrier
#pragma omp master
{
libsais_radix_sort_lms_suffixes_32s_6k_block_sort(induction_bucket, cache - block_start, block_start, block_size);
}
#pragma omp barrier
{
libsais_place_cached_suffixes(SA, cache - block_start, omp_block_start, omp_block_size);
}
}
#endif
}
}
static void libsais_radix_sort_lms_suffixes_32s_2k_block_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(cache);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start;
omp_block_start += block_start;
if (omp_num_threads == 1)
{
libsais_radix_sort_lms_suffixes_32s_2k(T, SA, induction_bucket, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
libsais_radix_sort_lms_suffixes_32s_block_gather(T, SA, cache - block_start, omp_block_start, omp_block_size);
}
#pragma omp barrier
#pragma omp master
{
libsais_radix_sort_lms_suffixes_32s_2k_block_sort(induction_bucket, cache - block_start, block_start, block_size);
}
#pragma omp barrier
{
libsais_place_cached_suffixes(SA, cache - block_start, omp_block_start, omp_block_size);
}
}
#endif
}
}
#endif
static void libsais_radix_sort_lms_suffixes_32s_6k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
if (threads == 1 || m < 65536)
{
libsais_radix_sort_lms_suffixes_32s_6k(T, SA, induction_bucket, (fast_sint_t)n - (fast_sint_t)m + 1, (fast_sint_t)m - 1);
}
#if defined(_OPENMP)
else
{
fast_sint_t block_start, block_end;
for (block_start = 0; block_start < (fast_sint_t)m - 1; block_start = block_end)
{
block_end = block_start + (fast_sint_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE; if (block_end >= m) { block_end = (fast_sint_t)m - 1; }
libsais_radix_sort_lms_suffixes_32s_6k_block_omp(T, SA, induction_bucket, thread_state[0].state.cache, (fast_sint_t)n - block_end, block_end - block_start, threads);
}
}
#else
UNUSED(thread_state);
#endif
}
static void libsais_radix_sort_lms_suffixes_32s_2k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
if (threads == 1 || m < 65536)
{
libsais_radix_sort_lms_suffixes_32s_2k(T, SA, induction_bucket, (fast_sint_t)n - (fast_sint_t)m + 1, (fast_sint_t)m - 1);
}
#if defined(_OPENMP)
else
{
fast_sint_t block_start, block_end;
for (block_start = 0; block_start < (fast_sint_t)m - 1; block_start = block_end)
{
block_end = block_start + (fast_sint_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE; if (block_end >= m) { block_end = (fast_sint_t)m - 1; }
libsais_radix_sort_lms_suffixes_32s_2k_block_omp(T, SA, induction_bucket, thread_state[0].state.cache, (fast_sint_t)n - block_end, block_end - block_start, threads);
}
}
#else
UNUSED(thread_state);
#endif
}
static sa_sint_t libsais_radix_sort_lms_suffixes_32s_1k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT buckets)
{
const fast_sint_t prefetch_distance = 32;
sa_sint_t i = n - 2;
sa_sint_t m = 0;
fast_uint_t s = 1;
fast_sint_t c0 = T[n - 1];
fast_sint_t c1 = 0;
fast_sint_t c2 = 0;
for (; i >= prefetch_distance + 3; i -= 4)
{
libsais_prefetch(&T[i - 2 * prefetch_distance]);
libsais_prefetchw(&buckets[T[i - prefetch_distance - 0]]);
libsais_prefetchw(&buckets[T[i - prefetch_distance - 1]]);
libsais_prefetchw(&buckets[T[i - prefetch_distance - 2]]);
libsais_prefetchw(&buckets[T[i - prefetch_distance - 3]]);
c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1)));
if ((s & 3) == 1) { SA[--buckets[c2 = c0]] = i + 1; m++; }
c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1)));
if ((s & 3) == 1) { SA[--buckets[c2 = c1]] = i - 0; m++; }
c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1)));
if ((s & 3) == 1) { SA[--buckets[c2 = c0]] = i - 1; m++; }
c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1)));
if ((s & 3) == 1) { SA[--buckets[c2 = c1]] = i - 2; m++; }
}
for (; i >= 0; i -= 1)
{
c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1)));
if ((s & 3) == 1) { SA[--buckets[c2 = c1]] = i + 1; m++; }
}
if (m > 1)
{
SA[buckets[c2]] = 0;
}
return m;
}
static void libsais_radix_sort_set_markers_32s_6k(sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 3; i < j; i += 4)
{
libsais_prefetch(&induction_bucket[i + 2 * prefetch_distance]);
libsais_prefetchw(&SA[induction_bucket[i + prefetch_distance + 0]]);
libsais_prefetchw(&SA[induction_bucket[i + prefetch_distance + 1]]);
libsais_prefetchw(&SA[induction_bucket[i + prefetch_distance + 2]]);
libsais_prefetchw(&SA[induction_bucket[i + prefetch_distance + 3]]);
SA[induction_bucket[i + 0]] |= SAINT_MIN;
SA[induction_bucket[i + 1]] |= SAINT_MIN;
SA[induction_bucket[i + 2]] |= SAINT_MIN;
SA[induction_bucket[i + 3]] |= SAINT_MIN;
}
for (j += prefetch_distance + 3; i < j; i += 1)
{
SA[induction_bucket[i]] |= SAINT_MIN;
}
}
static void libsais_radix_sort_set_markers_32s_4k(sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 3; i < j; i += 4)
{
libsais_prefetch(&induction_bucket[BUCKETS_INDEX2(i + 2 * prefetch_distance, 0)]);
libsais_prefetchw(&SA[induction_bucket[BUCKETS_INDEX2(i + prefetch_distance + 0, 0)]]);
libsais_prefetchw(&SA[induction_bucket[BUCKETS_INDEX2(i + prefetch_distance + 1, 0)]]);
libsais_prefetchw(&SA[induction_bucket[BUCKETS_INDEX2(i + prefetch_distance + 2, 0)]]);
libsais_prefetchw(&SA[induction_bucket[BUCKETS_INDEX2(i + prefetch_distance + 3, 0)]]);
SA[induction_bucket[BUCKETS_INDEX2(i + 0, 0)]] |= SUFFIX_GROUP_MARKER;
SA[induction_bucket[BUCKETS_INDEX2(i + 1, 0)]] |= SUFFIX_GROUP_MARKER;
SA[induction_bucket[BUCKETS_INDEX2(i + 2, 0)]] |= SUFFIX_GROUP_MARKER;
SA[induction_bucket[BUCKETS_INDEX2(i + 3, 0)]] |= SUFFIX_GROUP_MARKER;
}
for (j += prefetch_distance + 3; i < j; i += 1)
{
SA[induction_bucket[BUCKETS_INDEX2(i, 0)]] |= SUFFIX_GROUP_MARKER;
}
}
static void libsais_radix_sort_set_markers_32s_6k_omp(sa_sint_t * RESTRICT SA, sa_sint_t k, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && k >= 65536)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
fast_sint_t omp_block_stride = (((fast_sint_t)k - 1) / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : (fast_sint_t)k - 1 - omp_block_start;
#else
UNUSED(threads);
fast_sint_t omp_block_start = 0;
fast_sint_t omp_block_size = (fast_sint_t)k - 1;
#endif
libsais_radix_sort_set_markers_32s_6k(SA, induction_bucket, omp_block_start, omp_block_size);
}
}
static void libsais_radix_sort_set_markers_32s_4k_omp(sa_sint_t * RESTRICT SA, sa_sint_t k, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && k >= 65536)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
fast_sint_t omp_block_stride = (((fast_sint_t)k - 1) / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : (fast_sint_t)k - 1 - omp_block_start;
#else
UNUSED(threads);
fast_sint_t omp_block_start = 0;
fast_sint_t omp_block_size = (fast_sint_t)k - 1;
#endif
libsais_radix_sort_set_markers_32s_4k(SA, induction_bucket, omp_block_start, omp_block_size);
}
}
static void libsais_initialize_buckets_for_partial_sorting_8u(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT buckets, sa_sint_t first_lms_suffix, sa_sint_t left_suffixes_count)
{
sa_sint_t * RESTRICT temp_bucket = &buckets[4 * ALPHABET_SIZE];
buckets[BUCKETS_INDEX4((fast_uint_t)T[first_lms_suffix], 1)]++;
fast_sint_t i, j; sa_sint_t sum0 = left_suffixes_count + 1, sum1 = 0;
for (i = BUCKETS_INDEX4(0, 0), j = BUCKETS_INDEX2(0, 0); i <= BUCKETS_INDEX4(UCHAR_MAX, 0); i += BUCKETS_INDEX4(1, 0), j += BUCKETS_INDEX2(1, 0))
{
temp_bucket[j + BUCKETS_INDEX2(0, 0)] = sum0;
sum0 += buckets[i + BUCKETS_INDEX4(0, 0)] + buckets[i + BUCKETS_INDEX4(0, 2)];
sum1 += buckets[i + BUCKETS_INDEX4(0, 1)];
buckets[j + BUCKETS_INDEX2(0, 0)] = sum0;
buckets[j + BUCKETS_INDEX2(0, 1)] = sum1;
}
}
static void libsais_initialize_buckets_for_partial_sorting_32s_6k(const sa_sint_t * RESTRICT T, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t first_lms_suffix, sa_sint_t left_suffixes_count)
{
sa_sint_t * RESTRICT temp_bucket = &buckets[4 * k];
fast_sint_t i, j; sa_sint_t sum0 = left_suffixes_count + 1, sum1 = 0, sum2 = 0;
for (first_lms_suffix = T[first_lms_suffix], i = BUCKETS_INDEX4(0, 0), j = BUCKETS_INDEX2(0, 0); i <= BUCKETS_INDEX4((fast_sint_t)first_lms_suffix - 1, 0); i += BUCKETS_INDEX4(1, 0), j += BUCKETS_INDEX2(1, 0))
{
sa_sint_t SS = buckets[i + BUCKETS_INDEX4(0, 0)];
sa_sint_t LS = buckets[i + BUCKETS_INDEX4(0, 1)];
sa_sint_t SL = buckets[i + BUCKETS_INDEX4(0, 2)];
sa_sint_t LL = buckets[i + BUCKETS_INDEX4(0, 3)];
buckets[i + BUCKETS_INDEX4(0, 0)] = sum0;
buckets[i + BUCKETS_INDEX4(0, 1)] = sum2;
buckets[i + BUCKETS_INDEX4(0, 2)] = 0;
buckets[i + BUCKETS_INDEX4(0, 3)] = 0;
sum0 += SS + SL; sum1 += LS; sum2 += LS + LL;
temp_bucket[j + BUCKETS_INDEX2(0, 0)] = sum0;
temp_bucket[j + BUCKETS_INDEX2(0, 1)] = sum1;
}
for (sum1 += 1; i <= BUCKETS_INDEX4((fast_sint_t)k - 1, 0); i += BUCKETS_INDEX4(1, 0), j += BUCKETS_INDEX2(1, 0))
{
sa_sint_t SS = buckets[i + BUCKETS_INDEX4(0, 0)];
sa_sint_t LS = buckets[i + BUCKETS_INDEX4(0, 1)];
sa_sint_t SL = buckets[i + BUCKETS_INDEX4(0, 2)];
sa_sint_t LL = buckets[i + BUCKETS_INDEX4(0, 3)];
buckets[i + BUCKETS_INDEX4(0, 0)] = sum0;
buckets[i + BUCKETS_INDEX4(0, 1)] = sum2;
buckets[i + BUCKETS_INDEX4(0, 2)] = 0;
buckets[i + BUCKETS_INDEX4(0, 3)] = 0;
sum0 += SS + SL; sum1 += LS; sum2 += LS + LL;
temp_bucket[j + BUCKETS_INDEX2(0, 0)] = sum0;
temp_bucket[j + BUCKETS_INDEX2(0, 1)] = sum1;
}
}
static sa_sint_t libsais_partial_sorting_scan_left_to_right_8u(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, sa_sint_t d, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
sa_sint_t * RESTRICT induction_bucket = &buckets[4 * ALPHABET_SIZE];
sa_sint_t * RESTRICT distinct_names = &buckets[2 * ALPHABET_SIZE];
fast_sint_t i, j;
for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2)
{
libsais_prefetch(&SA[i + 2 * prefetch_distance]);
libsais_prefetch(&T[SA[i + prefetch_distance + 0] & SAINT_MAX] - 1);
libsais_prefetch(&T[SA[i + prefetch_distance + 0] & SAINT_MAX] - 2);
libsais_prefetch(&T[SA[i + prefetch_distance + 1] & SAINT_MAX] - 1);
libsais_prefetch(&T[SA[i + prefetch_distance + 1] & SAINT_MAX] - 2);
sa_sint_t p0 = SA[i + 0]; d += (p0 < 0); p0 &= SAINT_MAX; sa_sint_t v0 = BUCKETS_INDEX2(T[p0 - 1], T[p0 - 2] >= T[p0 - 1]);
SA[induction_bucket[v0]++] = (p0 - 1) | ((sa_sint_t)(distinct_names[v0] != d) << (SAINT_BIT - 1)); distinct_names[v0] = d;
sa_sint_t p1 = SA[i + 1]; d += (p1 < 0); p1 &= SAINT_MAX; sa_sint_t v1 = BUCKETS_INDEX2(T[p1 - 1], T[p1 - 2] >= T[p1 - 1]);
SA[induction_bucket[v1]++] = (p1 - 1) | ((sa_sint_t)(distinct_names[v1] != d) << (SAINT_BIT - 1)); distinct_names[v1] = d;
}
for (j += prefetch_distance + 1; i < j; i += 1)
{
sa_sint_t p = SA[i]; d += (p < 0); p &= SAINT_MAX; sa_sint_t v = BUCKETS_INDEX2(T[p - 1], T[p - 2] >= T[p - 1]);
SA[induction_bucket[v]++] = (p - 1) | ((sa_sint_t)(distinct_names[v] != d) << (SAINT_BIT - 1)); distinct_names[v] = d;
}
return d;
}
#if defined(_OPENMP)
static void libsais_partial_sorting_scan_left_to_right_8u_block_prepare(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size, LIBSAIS_THREAD_STATE * RESTRICT state)
{
const fast_sint_t prefetch_distance = 32;
sa_sint_t * RESTRICT induction_bucket = &buckets[0 * ALPHABET_SIZE];
sa_sint_t * RESTRICT distinct_names = &buckets[2 * ALPHABET_SIZE];
memset(buckets, 0, 4 * ALPHABET_SIZE * sizeof(sa_sint_t));
fast_sint_t i, j, count = 0; sa_sint_t d = 1;
for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2)
{
libsais_prefetch(&SA[i + 2 * prefetch_distance]);
libsais_prefetch(&T[SA[i + prefetch_distance + 0] & SAINT_MAX] - 1);
libsais_prefetch(&T[SA[i + prefetch_distance + 0] & SAINT_MAX] - 2);
libsais_prefetch(&T[SA[i + prefetch_distance + 1] & SAINT_MAX] - 1);
libsais_prefetch(&T[SA[i + prefetch_distance + 1] & SAINT_MAX] - 2);
sa_sint_t p0 = cache[count].index = SA[i + 0]; d += (p0 < 0); p0 &= SAINT_MAX; sa_sint_t v0 = cache[count++].symbol = BUCKETS_INDEX2(T[p0 - 1], T[p0 - 2] >= T[p0 - 1]); induction_bucket[v0]++; distinct_names[v0] = d;
sa_sint_t p1 = cache[count].index = SA[i + 1]; d += (p1 < 0); p1 &= SAINT_MAX; sa_sint_t v1 = cache[count++].symbol = BUCKETS_INDEX2(T[p1 - 1], T[p1 - 2] >= T[p1 - 1]); induction_bucket[v1]++; distinct_names[v1] = d;
}
for (j += prefetch_distance + 1; i < j; i += 1)
{
sa_sint_t p = cache[count].index = SA[i]; d += (p < 0); p &= SAINT_MAX; sa_sint_t v = cache[count++].symbol = BUCKETS_INDEX2(T[p - 1], T[p - 2] >= T[p - 1]); induction_bucket[v]++; distinct_names[v] = d;
}
state[0].state.position = (fast_sint_t)d - 1;
state[0].state.count = count;
}
static void libsais_partial_sorting_scan_left_to_right_8u_block_place(sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t count, sa_sint_t d)
{
const fast_sint_t prefetch_distance = 32;
sa_sint_t * RESTRICT induction_bucket = &buckets[0 * ALPHABET_SIZE];
sa_sint_t * RESTRICT distinct_names = &buckets[2 * ALPHABET_SIZE];
fast_sint_t i, j;
for (i = 0, j = count - 1; i < j; i += 2)
{
libsais_prefetch(&cache[i + prefetch_distance]);
sa_sint_t p0 = cache[i + 0].index; d += (p0 < 0); sa_sint_t v0 = cache[i + 0].symbol;
SA[induction_bucket[v0]++] = (p0 - 1) | ((sa_sint_t)(distinct_names[v0] != d) << (SAINT_BIT - 1)); distinct_names[v0] = d;
sa_sint_t p1 = cache[i + 1].index; d += (p1 < 0); sa_sint_t v1 = cache[i + 1].symbol;
SA[induction_bucket[v1]++] = (p1 - 1) | ((sa_sint_t)(distinct_names[v1] != d) << (SAINT_BIT - 1)); distinct_names[v1] = d;
}
for (j += 1; i < j; i += 1)
{
sa_sint_t p = cache[i].index; d += (p < 0); sa_sint_t v = cache[i].symbol;
SA[induction_bucket[v]++] = (p - 1) | ((sa_sint_t)(distinct_names[v] != d) << (SAINT_BIT - 1)); distinct_names[v] = d;
}
}
static sa_sint_t libsais_partial_sorting_scan_left_to_right_8u_block_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, sa_sint_t d, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384 && omp_get_dynamic() == 0)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(thread_state);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start;
omp_block_start += block_start;
if (omp_num_threads == 1)
{
d = libsais_partial_sorting_scan_left_to_right_8u(T, SA, buckets, d, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
libsais_partial_sorting_scan_left_to_right_8u_block_prepare(T, SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, omp_block_start, omp_block_size, &thread_state[omp_thread_num]);
}
#pragma omp barrier
#pragma omp master
{
sa_sint_t * RESTRICT induction_bucket = &buckets[4 * ALPHABET_SIZE];
sa_sint_t * RESTRICT distinct_names = &buckets[2 * ALPHABET_SIZE];
fast_sint_t t;
for (t = 0; t < omp_num_threads; ++t)
{
sa_sint_t * RESTRICT temp_induction_bucket = &thread_state[t].state.buckets[0 * ALPHABET_SIZE];
sa_sint_t * RESTRICT temp_distinct_names = &thread_state[t].state.buckets[2 * ALPHABET_SIZE];
fast_sint_t c;
for (c = 0; c < 2 * ALPHABET_SIZE; c += 1) { sa_sint_t A = induction_bucket[c], B = temp_induction_bucket[c]; induction_bucket[c] = A + B; temp_induction_bucket[c] = A; }
for (d -= 1, c = 0; c < 2 * ALPHABET_SIZE; c += 1) { sa_sint_t A = distinct_names[c], B = temp_distinct_names[c], D = B + d; distinct_names[c] = B > 0 ? D : A; temp_distinct_names[c] = A; }
d += 1 + (sa_sint_t)thread_state[t].state.position; thread_state[t].state.position = (fast_sint_t)d - thread_state[t].state.position;
}
}
#pragma omp barrier
{
libsais_partial_sorting_scan_left_to_right_8u_block_place(SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, thread_state[omp_thread_num].state.count, (sa_sint_t)thread_state[omp_thread_num].state.position);
}
}
#endif
}
return d;
}
#endif
static sa_sint_t libsais_partial_sorting_scan_left_to_right_8u_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT buckets, sa_sint_t left_suffixes_count, sa_sint_t d, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
sa_sint_t * RESTRICT induction_bucket = &buckets[4 * ALPHABET_SIZE];
sa_sint_t * RESTRICT distinct_names = &buckets[2 * ALPHABET_SIZE];
SA[induction_bucket[BUCKETS_INDEX2(T[n - 1], T[n - 2] >= T[n - 1])]++] = (n - 1) | SAINT_MIN;
distinct_names[BUCKETS_INDEX2(T[n - 1], T[n - 2] >= T[n - 1])] = ++d;
if (threads == 1 || left_suffixes_count < 65536)
{
d = libsais_partial_sorting_scan_left_to_right_8u(T, SA, buckets, d, 0, left_suffixes_count);
}
#if defined(_OPENMP)
else
{
fast_sint_t block_start;
for (block_start = 0; block_start < left_suffixes_count; )
{
if (SA[block_start] == 0)
{
block_start++;
}
else
{
fast_sint_t block_max_end = block_start + ((fast_sint_t)threads) * (LIBSAIS_PER_THREAD_CACHE_SIZE - 16 * (fast_sint_t)threads); if (block_max_end > left_suffixes_count) { block_max_end = left_suffixes_count;}
fast_sint_t block_end = block_start + 1; while (block_end < block_max_end && SA[block_end] != 0) { block_end++; }
fast_sint_t block_size = block_end - block_start;
if (block_size < 32)
{
for (; block_start < block_end; block_start += 1)
{
sa_sint_t p = SA[block_start]; d += (p < 0); p &= SAINT_MAX; sa_sint_t v = BUCKETS_INDEX2(T[p - 1], T[p - 2] >= T[p - 1]);
SA[induction_bucket[v]++] = (p - 1) | ((sa_sint_t)(distinct_names[v] != d) << (SAINT_BIT - 1)); distinct_names[v] = d;
}
}
else
{
d = libsais_partial_sorting_scan_left_to_right_8u_block_omp(T, SA, buckets, d, block_start, block_size, threads, thread_state);
block_start = block_end;
}
}
}
}
#else
UNUSED(thread_state);
#endif
return d;
}
static sa_sint_t libsais_partial_sorting_scan_left_to_right_32s_6k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, sa_sint_t d, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start, j = omp_block_start + omp_block_size - 2 * prefetch_distance - 1; i < j; i += 2)
{
libsais_prefetch(&SA[i + 3 * prefetch_distance]);
libsais_prefetch(&T[SA[i + 2 * prefetch_distance + 0] & SAINT_MAX] - 1);
libsais_prefetch(&T[SA[i + 2 * prefetch_distance + 0] & SAINT_MAX] - 2);
libsais_prefetch(&T[SA[i + 2 * prefetch_distance + 1] & SAINT_MAX] - 1);
libsais_prefetch(&T[SA[i + 2 * prefetch_distance + 1] & SAINT_MAX] - 2);
sa_sint_t p0 = SA[i + prefetch_distance + 0] & SAINT_MAX; sa_sint_t v0 = BUCKETS_INDEX4(T[p0 - (p0 > 0)], 0); libsais_prefetchw(&buckets[v0]);
sa_sint_t p1 = SA[i + prefetch_distance + 1] & SAINT_MAX; sa_sint_t v1 = BUCKETS_INDEX4(T[p1 - (p1 > 0)], 0); libsais_prefetchw(&buckets[v1]);
sa_sint_t p2 = SA[i + 0]; d += (p2 < 0); p2 &= SAINT_MAX; sa_sint_t v2 = BUCKETS_INDEX4(T[p2 - 1], T[p2 - 2] >= T[p2 - 1]);
SA[buckets[v2]++] = (p2 - 1) | ((sa_sint_t)(buckets[2 + v2] != d) << (SAINT_BIT - 1)); buckets[2 + v2] = d;
sa_sint_t p3 = SA[i + 1]; d += (p3 < 0); p3 &= SAINT_MAX; sa_sint_t v3 = BUCKETS_INDEX4(T[p3 - 1], T[p3 - 2] >= T[p3 - 1]);
SA[buckets[v3]++] = (p3 - 1) | ((sa_sint_t)(buckets[2 + v3] != d) << (SAINT_BIT - 1)); buckets[2 + v3] = d;
}
for (j += 2 * prefetch_distance + 1; i < j; i += 1)
{
sa_sint_t p = SA[i]; d += (p < 0); p &= SAINT_MAX; sa_sint_t v = BUCKETS_INDEX4(T[p - 1], T[p - 2] >= T[p - 1]);
SA[buckets[v]++] = (p - 1) | ((sa_sint_t)(buckets[2 + v] != d) << (SAINT_BIT - 1)); buckets[2 + v] = d;
}
return d;
}
static sa_sint_t libsais_partial_sorting_scan_left_to_right_32s_4k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t d, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
sa_sint_t * RESTRICT induction_bucket = &buckets[2 * k];
sa_sint_t * RESTRICT distinct_names = &buckets[0 * k];
fast_sint_t i, j;
for (i = omp_block_start, j = omp_block_start + omp_block_size - 2 * prefetch_distance - 1; i < j; i += 2)
{
libsais_prefetchw(&SA[i + 3 * prefetch_distance]);
sa_sint_t s0 = SA[i + 2 * prefetch_distance + 0]; const sa_sint_t * Ts0 = &T[s0 & ~SUFFIX_GROUP_MARKER] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL);
sa_sint_t s1 = SA[i + 2 * prefetch_distance + 1]; const sa_sint_t * Ts1 = &T[s1 & ~SUFFIX_GROUP_MARKER] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL);
sa_sint_t s2 = SA[i + 1 * prefetch_distance + 0]; if (s2 > 0) { const fast_sint_t Ts2 = T[(s2 & ~SUFFIX_GROUP_MARKER) - 1]; libsais_prefetchw(&induction_bucket[Ts2]); libsais_prefetchw(&distinct_names[BUCKETS_INDEX2(Ts2, 0)]); }
sa_sint_t s3 = SA[i + 1 * prefetch_distance + 1]; if (s3 > 0) { const fast_sint_t Ts3 = T[(s3 & ~SUFFIX_GROUP_MARKER) - 1]; libsais_prefetchw(&induction_bucket[Ts3]); libsais_prefetchw(&distinct_names[BUCKETS_INDEX2(Ts3, 0)]); }
sa_sint_t p0 = SA[i + 0]; SA[i + 0] = p0 & SAINT_MAX;
if (p0 > 0)
{
SA[i + 0] = 0; d += (p0 >> (SUFFIX_GROUP_BIT - 1)); p0 &= ~SUFFIX_GROUP_MARKER; sa_sint_t v0 = BUCKETS_INDEX2(T[p0 - 1], T[p0 - 2] < T[p0 - 1]);
SA[induction_bucket[T[p0 - 1]]++] = (p0 - 1) | ((sa_sint_t)(T[p0 - 2] < T[p0 - 1]) << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v0] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v0] = d;
}
sa_sint_t p1 = SA[i + 1]; SA[i + 1] = p1 & SAINT_MAX;
if (p1 > 0)
{
SA[i + 1] = 0; d += (p1 >> (SUFFIX_GROUP_BIT - 1)); p1 &= ~SUFFIX_GROUP_MARKER; sa_sint_t v1 = BUCKETS_INDEX2(T[p1 - 1], T[p1 - 2] < T[p1 - 1]);
SA[induction_bucket[T[p1 - 1]]++] = (p1 - 1) | ((sa_sint_t)(T[p1 - 2] < T[p1 - 1]) << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v1] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v1] = d;
}
}
for (j += 2 * prefetch_distance + 1; i < j; i += 1)
{
sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX;
if (p > 0)
{
SA[i] = 0; d += (p >> (SUFFIX_GROUP_BIT - 1)); p &= ~SUFFIX_GROUP_MARKER; sa_sint_t v = BUCKETS_INDEX2(T[p - 1], T[p - 2] < T[p - 1]);
SA[induction_bucket[T[p - 1]]++] = (p - 1) | ((sa_sint_t)(T[p - 2] < T[p - 1]) << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v] = d;
}
}
return d;
}
static void libsais_partial_sorting_scan_left_to_right_32s_1k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start, j = omp_block_start + omp_block_size - 2 * prefetch_distance - 1; i < j; i += 2)
{
libsais_prefetchw(&SA[i + 3 * prefetch_distance]);
sa_sint_t s0 = SA[i + 2 * prefetch_distance + 0]; const sa_sint_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL);
sa_sint_t s1 = SA[i + 2 * prefetch_distance + 1]; const sa_sint_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL);
sa_sint_t s2 = SA[i + 1 * prefetch_distance + 0]; if (s2 > 0) { libsais_prefetchw(&induction_bucket[T[s2 - 1]]); libsais_prefetch(&T[s2] - 2); }
sa_sint_t s3 = SA[i + 1 * prefetch_distance + 1]; if (s3 > 0) { libsais_prefetchw(&induction_bucket[T[s3 - 1]]); libsais_prefetch(&T[s3] - 2); }
sa_sint_t p0 = SA[i + 0]; SA[i + 0] = p0 & SAINT_MAX; if (p0 > 0) { SA[i + 0] = 0; SA[induction_bucket[T[p0 - 1]]++] = (p0 - 1) | ((sa_sint_t)(T[p0 - 2] < T[p0 - 1]) << (SAINT_BIT - 1)); }
sa_sint_t p1 = SA[i + 1]; SA[i + 1] = p1 & SAINT_MAX; if (p1 > 0) { SA[i + 1] = 0; SA[induction_bucket[T[p1 - 1]]++] = (p1 - 1) | ((sa_sint_t)(T[p1 - 2] < T[p1 - 1]) << (SAINT_BIT - 1)); }
}
for (j += 2 * prefetch_distance + 1; i < j; i += 1)
{
sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { SA[i] = 0; SA[induction_bucket[T[p - 1]]++] = (p - 1) | ((sa_sint_t)(T[p - 2] < T[p - 1]) << (SAINT_BIT - 1)); }
}
}
#if defined(_OPENMP)
static void libsais_partial_sorting_scan_left_to_right_32s_6k_block_gather(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2)
{
libsais_prefetch(&SA[i + 2 * prefetch_distance]);
libsais_prefetch(&T[SA[i + prefetch_distance + 0] & SAINT_MAX] - 1);
libsais_prefetch(&T[SA[i + prefetch_distance + 0] & SAINT_MAX] - 2);
libsais_prefetch(&T[SA[i + prefetch_distance + 1] & SAINT_MAX] - 1);
libsais_prefetch(&T[SA[i + prefetch_distance + 1] & SAINT_MAX] - 2);
libsais_prefetchw(&cache[i + prefetch_distance]);
sa_sint_t p0 = cache[i + 0].index = SA[i + 0]; sa_sint_t symbol0 = 0; p0 &= SAINT_MAX; if (p0 != 0) { symbol0 = BUCKETS_INDEX4(T[p0 - 1], T[p0 - 2] >= T[p0 - 1]); } cache[i + 0].symbol = symbol0;
sa_sint_t p1 = cache[i + 1].index = SA[i + 1]; sa_sint_t symbol1 = 0; p1 &= SAINT_MAX; if (p1 != 0) { symbol1 = BUCKETS_INDEX4(T[p1 - 1], T[p1 - 2] >= T[p1 - 1]); } cache[i + 1].symbol = symbol1;
}
for (j += prefetch_distance + 1; i < j; i += 1)
{
sa_sint_t p = cache[i].index = SA[i]; sa_sint_t symbol = 0; p &= SAINT_MAX; if (p != 0) { symbol = BUCKETS_INDEX4(T[p - 1], T[p - 2] >= T[p - 1]); } cache[i].symbol = symbol;
}
}
static void libsais_partial_sorting_scan_left_to_right_32s_4k_block_gather(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2)
{
libsais_prefetchw(&SA[i + 2 * prefetch_distance]);
sa_sint_t s0 = SA[i + prefetch_distance + 0]; const sa_sint_t * Ts0 = &T[s0 & ~SUFFIX_GROUP_MARKER] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL);
sa_sint_t s1 = SA[i + prefetch_distance + 1]; const sa_sint_t * Ts1 = &T[s1 & ~SUFFIX_GROUP_MARKER] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL);
libsais_prefetchw(&cache[i + prefetch_distance]);
sa_sint_t symbol0 = SAINT_MIN, p0 = SA[i + 0]; if (p0 > 0) { cache[i + 0].index = p0; p0 &= ~SUFFIX_GROUP_MARKER; symbol0 = BUCKETS_INDEX2(T[p0 - 1], T[p0 - 2] < T[p0 - 1]); p0 = 0; } cache[i + 0].symbol = symbol0; SA[i + 0] = p0 & SAINT_MAX;
sa_sint_t symbol1 = SAINT_MIN, p1 = SA[i + 1]; if (p1 > 0) { cache[i + 1].index = p1; p1 &= ~SUFFIX_GROUP_MARKER; symbol1 = BUCKETS_INDEX2(T[p1 - 1], T[p1 - 2] < T[p1 - 1]); p1 = 0; } cache[i + 1].symbol = symbol1; SA[i + 1] = p1 & SAINT_MAX;
}
for (j += prefetch_distance + 1; i < j; i += 1)
{
sa_sint_t symbol = SAINT_MIN, p = SA[i]; if (p > 0) { cache[i].index = p; p &= ~SUFFIX_GROUP_MARKER; symbol = BUCKETS_INDEX2(T[p - 1], T[p - 2] < T[p - 1]); p = 0; } cache[i].symbol = symbol; SA[i] = p & SAINT_MAX;
}
}
static void libsais_partial_sorting_scan_left_to_right_32s_1k_block_gather(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2)
{
libsais_prefetchw(&SA[i + 2 * prefetch_distance]);
sa_sint_t s0 = SA[i + prefetch_distance + 0]; const sa_sint_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL);
sa_sint_t s1 = SA[i + prefetch_distance + 1]; const sa_sint_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL);
libsais_prefetchw(&cache[i + prefetch_distance]);
sa_sint_t symbol0 = SAINT_MIN, p0 = SA[i + 0]; if (p0 > 0) { cache[i + 0].index = (p0 - 1) | ((sa_sint_t)(T[p0 - 2] < T[p0 - 1]) << (SAINT_BIT - 1)); symbol0 = T[p0 - 1]; p0 = 0; } cache[i + 0].symbol = symbol0; SA[i + 0] = p0 & SAINT_MAX;
sa_sint_t symbol1 = SAINT_MIN, p1 = SA[i + 1]; if (p1 > 0) { cache[i + 1].index = (p1 - 1) | ((sa_sint_t)(T[p1 - 2] < T[p1 - 1]) << (SAINT_BIT - 1)); symbol1 = T[p1 - 1]; p1 = 0; } cache[i + 1].symbol = symbol1; SA[i + 1] = p1 & SAINT_MAX;
}
for (j += prefetch_distance + 1; i < j; i += 1)
{
sa_sint_t symbol = SAINT_MIN, p = SA[i]; if (p > 0) { cache[i].index = (p - 1) | ((sa_sint_t)(T[p - 2] < T[p - 1]) << (SAINT_BIT - 1)); symbol = T[p - 1]; p = 0; } cache[i].symbol = symbol; SA[i] = p & SAINT_MAX;
}
}
static sa_sint_t libsais_partial_sorting_scan_left_to_right_32s_6k_block_sort(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT buckets, sa_sint_t d, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j, omp_block_end = omp_block_start + omp_block_size;
for (i = omp_block_start, j = omp_block_end - prefetch_distance - 1; i < j; i += 2)
{
libsais_prefetchw(&cache[i + 2 * prefetch_distance]);
libsais_prefetchw(&buckets[cache[i + prefetch_distance + 0].symbol]);
libsais_prefetchw(&buckets[cache[i + prefetch_distance + 1].symbol]);
sa_sint_t v0 = cache[i + 0].symbol, p0 = cache[i + 0].index; d += (p0 < 0); cache[i + 0].symbol = buckets[v0]++; cache[i + 0].index = (p0 - 1) | ((sa_sint_t)(buckets[2 + v0] != d) << (SAINT_BIT - 1)); buckets[2 + v0] = d;
if (cache[i + 0].symbol < omp_block_end) { sa_sint_t s = cache[i + 0].symbol, q = (cache[s].index = cache[i + 0].index) & SAINT_MAX; cache[s].symbol = BUCKETS_INDEX4(T[q - 1], T[q - 2] >= T[q - 1]); }
sa_sint_t v1 = cache[i + 1].symbol, p1 = cache[i + 1].index; d += (p1 < 0); cache[i + 1].symbol = buckets[v1]++; cache[i + 1].index = (p1 - 1) | ((sa_sint_t)(buckets[2 + v1] != d) << (SAINT_BIT - 1)); buckets[2 + v1] = d;
if (cache[i + 1].symbol < omp_block_end) { sa_sint_t s = cache[i + 1].symbol, q = (cache[s].index = cache[i + 1].index) & SAINT_MAX; cache[s].symbol = BUCKETS_INDEX4(T[q - 1], T[q - 2] >= T[q - 1]); }
}
for (j += prefetch_distance + 1; i < j; i += 1)
{
sa_sint_t v = cache[i].symbol, p = cache[i].index; d += (p < 0); cache[i].symbol = buckets[v]++; cache[i].index = (p - 1) | ((sa_sint_t)(buckets[2 + v] != d) << (SAINT_BIT - 1)); buckets[2 + v] = d;
if (cache[i].symbol < omp_block_end) { sa_sint_t s = cache[i].symbol, q = (cache[s].index = cache[i].index) & SAINT_MAX; cache[s].symbol = BUCKETS_INDEX4(T[q - 1], T[q - 2] >= T[q - 1]); }
}
return d;
}
static sa_sint_t libsais_partial_sorting_scan_left_to_right_32s_4k_block_sort(const sa_sint_t * RESTRICT T, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t d, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
sa_sint_t * RESTRICT induction_bucket = &buckets[2 * k];
sa_sint_t * RESTRICT distinct_names = &buckets[0 * k];
fast_sint_t i, j, omp_block_end = omp_block_start + omp_block_size;
for (i = omp_block_start, j = omp_block_end - prefetch_distance - 1; i < j; i += 2)
{
libsais_prefetchw(&cache[i + 2 * prefetch_distance]);
sa_sint_t s0 = cache[i + prefetch_distance + 0].symbol; const sa_sint_t * Is0 = &induction_bucket[s0 >> 1]; libsais_prefetchw(s0 >= 0 ? Is0 : NULL); const sa_sint_t * Ds0 = &distinct_names[s0]; libsais_prefetchw(s0 >= 0 ? Ds0 : NULL);
sa_sint_t s1 = cache[i + prefetch_distance + 1].symbol; const sa_sint_t * Is1 = &induction_bucket[s1 >> 1]; libsais_prefetchw(s1 >= 0 ? Is1 : NULL); const sa_sint_t * Ds1 = &distinct_names[s1]; libsais_prefetchw(s1 >= 0 ? Ds1 : NULL);
sa_sint_t v0 = cache[i + 0].symbol;
if (v0 >= 0)
{
sa_sint_t p0 = cache[i + 0].index; d += (p0 >> (SUFFIX_GROUP_BIT - 1)); cache[i + 0].symbol = induction_bucket[v0 >> 1]++; cache[i + 0].index = (p0 - 1) | (v0 << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v0] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v0] = d;
if (cache[i + 0].symbol < omp_block_end) { sa_sint_t ni = cache[i + 0].symbol, np = cache[i + 0].index; if (np > 0) { cache[ni].index = np; np &= ~SUFFIX_GROUP_MARKER; cache[ni].symbol = BUCKETS_INDEX2(T[np - 1], T[np - 2] < T[np - 1]); np = 0; } cache[i + 0].index = np & SAINT_MAX; }
}
sa_sint_t v1 = cache[i + 1].symbol;
if (v1 >= 0)
{
sa_sint_t p1 = cache[i + 1].index; d += (p1 >> (SUFFIX_GROUP_BIT - 1)); cache[i + 1].symbol = induction_bucket[v1 >> 1]++; cache[i + 1].index = (p1 - 1) | (v1 << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v1] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v1] = d;
if (cache[i + 1].symbol < omp_block_end) { sa_sint_t ni = cache[i + 1].symbol, np = cache[i + 1].index; if (np > 0) { cache[ni].index = np; np &= ~SUFFIX_GROUP_MARKER; cache[ni].symbol = BUCKETS_INDEX2(T[np - 1], T[np - 2] < T[np - 1]); np = 0; } cache[i + 1].index = np & SAINT_MAX; }
}
}
for (j += prefetch_distance + 1; i < j; i += 1)
{
sa_sint_t v = cache[i].symbol;
if (v >= 0)
{
sa_sint_t p = cache[i].index; d += (p >> (SUFFIX_GROUP_BIT - 1)); cache[i].symbol = induction_bucket[v >> 1]++; cache[i].index = (p - 1) | (v << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v] = d;
if (cache[i].symbol < omp_block_end) { sa_sint_t ni = cache[i].symbol, np = cache[i].index; if (np > 0) { cache[ni].index = np; np &= ~SUFFIX_GROUP_MARKER; cache[ni].symbol = BUCKETS_INDEX2(T[np - 1], T[np - 2] < T[np - 1]); np = 0; } cache[i].index = np & SAINT_MAX; }
}
}
return d;
}
static void libsais_partial_sorting_scan_left_to_right_32s_1k_block_sort(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT induction_bucket, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j, omp_block_end = omp_block_start + omp_block_size;
for (i = omp_block_start, j = omp_block_end - prefetch_distance - 1; i < j; i += 2)
{
libsais_prefetchw(&cache[i + 2 * prefetch_distance]);
sa_sint_t s0 = cache[i + prefetch_distance + 0].symbol; const sa_sint_t * Is0 = &induction_bucket[s0]; libsais_prefetchw(s0 >= 0 ? Is0 : NULL);
sa_sint_t s1 = cache[i + prefetch_distance + 1].symbol; const sa_sint_t * Is1 = &induction_bucket[s1]; libsais_prefetchw(s1 >= 0 ? Is1 : NULL);
sa_sint_t v0 = cache[i + 0].symbol;
if (v0 >= 0)
{
cache[i + 0].symbol = induction_bucket[v0]++;
if (cache[i + 0].symbol < omp_block_end) { sa_sint_t ni = cache[i + 0].symbol, np = cache[i + 0].index; if (np > 0) { cache[ni].index = (np - 1) | ((sa_sint_t)(T[np - 2] < T[np - 1]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np - 1]; np = 0; } cache[i + 0].index = np & SAINT_MAX; }
}
sa_sint_t v1 = cache[i + 1].symbol;
if (v1 >= 0)
{
cache[i + 1].symbol = induction_bucket[v1]++;
if (cache[i + 1].symbol < omp_block_end) { sa_sint_t ni = cache[i + 1].symbol, np = cache[i + 1].index; if (np > 0) { cache[ni].index = (np - 1) | ((sa_sint_t)(T[np - 2] < T[np - 1]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np - 1]; np = 0; } cache[i + 1].index = np & SAINT_MAX; }
}
}
for (j += prefetch_distance + 1; i < j; i += 1)
{
sa_sint_t v = cache[i].symbol;
if (v >= 0)
{
cache[i].symbol = induction_bucket[v]++;
if (cache[i].symbol < omp_block_end) { sa_sint_t ni = cache[i].symbol, np = cache[i].index; if (np > 0) { cache[ni].index = (np - 1) | ((sa_sint_t)(T[np - 2] < T[np - 1]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np - 1]; np = 0; } cache[i].index = np & SAINT_MAX; }
}
}
}
static sa_sint_t libsais_partial_sorting_scan_left_to_right_32s_6k_block_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, sa_sint_t d, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(cache);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start;
omp_block_start += block_start;
if (omp_num_threads == 1)
{
d = libsais_partial_sorting_scan_left_to_right_32s_6k(T, SA, buckets, d, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
libsais_partial_sorting_scan_left_to_right_32s_6k_block_gather(T, SA, cache - block_start, omp_block_start, omp_block_size);
}
#pragma omp barrier
#pragma omp master
{
d = libsais_partial_sorting_scan_left_to_right_32s_6k_block_sort(T, buckets, d, cache - block_start, block_start, block_size);
}
#pragma omp barrier
{
libsais_place_cached_suffixes(SA, cache - block_start, omp_block_start, omp_block_size);
}
}
#endif
}
return d;
}
static sa_sint_t libsais_partial_sorting_scan_left_to_right_32s_4k_block_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t d, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(cache);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start;
omp_block_start += block_start;
if (omp_num_threads == 1)
{
d = libsais_partial_sorting_scan_left_to_right_32s_4k(T, SA, k, buckets, d, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
libsais_partial_sorting_scan_left_to_right_32s_4k_block_gather(T, SA, cache - block_start, omp_block_start, omp_block_size);
}
#pragma omp barrier
#pragma omp master
{
d = libsais_partial_sorting_scan_left_to_right_32s_4k_block_sort(T, k, buckets, d, cache - block_start, block_start, block_size);
}
#pragma omp barrier
{
libsais_compact_and_place_cached_suffixes(SA, cache - block_start, omp_block_start, omp_block_size);
}
}
#endif
}
return d;
}
static void libsais_partial_sorting_scan_left_to_right_32s_1k_block_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(cache);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start;
omp_block_start += block_start;
if (omp_num_threads == 1)
{
libsais_partial_sorting_scan_left_to_right_32s_1k(T, SA, buckets, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
libsais_partial_sorting_scan_left_to_right_32s_1k_block_gather(T, SA, cache - block_start, omp_block_start, omp_block_size);
}
#pragma omp barrier
#pragma omp master
{
libsais_partial_sorting_scan_left_to_right_32s_1k_block_sort(T, buckets, cache - block_start, block_start, block_size);
}
#pragma omp barrier
{
libsais_compact_and_place_cached_suffixes(SA, cache - block_start, omp_block_start, omp_block_size);
}
}
#endif
}
}
#endif
static sa_sint_t libsais_partial_sorting_scan_left_to_right_32s_6k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT buckets, sa_sint_t left_suffixes_count, sa_sint_t d, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
SA[buckets[BUCKETS_INDEX4(T[n - 1], T[n - 2] >= T[n - 1])]++] = (n - 1) | SAINT_MIN;
buckets[2 + BUCKETS_INDEX4(T[n - 1], T[n - 2] >= T[n - 1])] = ++d;
if (threads == 1 || left_suffixes_count < 65536)
{
d = libsais_partial_sorting_scan_left_to_right_32s_6k(T, SA, buckets, d, 0, left_suffixes_count);
}
#if defined(_OPENMP)
else
{
fast_sint_t block_start, block_end;
for (block_start = 0; block_start < left_suffixes_count; block_start = block_end)
{
block_end = block_start + (fast_sint_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE; if (block_end > left_suffixes_count) { block_end = left_suffixes_count; }
d = libsais_partial_sorting_scan_left_to_right_32s_6k_block_omp(T, SA, buckets, d, thread_state[0].state.cache, block_start, block_end - block_start, threads);
}
}
#else
UNUSED(thread_state);
#endif
return d;
}
static sa_sint_t libsais_partial_sorting_scan_left_to_right_32s_4k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t d, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
sa_sint_t * RESTRICT induction_bucket = &buckets[2 * k];
sa_sint_t * RESTRICT distinct_names = &buckets[0 * k];
SA[induction_bucket[T[n - 1]]++] = (n - 1) | ((sa_sint_t)(T[n - 2] < T[n - 1]) << (SAINT_BIT - 1)) | SUFFIX_GROUP_MARKER;
distinct_names[BUCKETS_INDEX2(T[n - 1], T[n - 2] < T[n - 1])] = ++d;
if (threads == 1 || n < 65536)
{
d = libsais_partial_sorting_scan_left_to_right_32s_4k(T, SA, k, buckets, d, 0, n);
}
#if defined(_OPENMP)
else
{
fast_sint_t block_start, block_end;
for (block_start = 0; block_start < n; block_start = block_end)
{
block_end = block_start + (fast_sint_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE; if (block_end > n) { block_end = n; }
d = libsais_partial_sorting_scan_left_to_right_32s_4k_block_omp(T, SA, k, buckets, d, thread_state[0].state.cache, block_start, block_end - block_start, threads);
}
}
#else
UNUSED(thread_state);
#endif
return d;
}
static void libsais_partial_sorting_scan_left_to_right_32s_1k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
SA[buckets[T[n - 1]]++] = (n - 1) | ((sa_sint_t)(T[n - 2] < T[n - 1]) << (SAINT_BIT - 1));
if (threads == 1 || n < 65536)
{
libsais_partial_sorting_scan_left_to_right_32s_1k(T, SA, buckets, 0, n);
}
#if defined(_OPENMP)
else
{
fast_sint_t block_start, block_end;
for (block_start = 0; block_start < n; block_start = block_end)
{
block_end = block_start + (fast_sint_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE; if (block_end > n) { block_end = n; }
libsais_partial_sorting_scan_left_to_right_32s_1k_block_omp(T, SA, buckets, thread_state[0].state.cache, block_start, block_end - block_start, threads);
}
}
#else
UNUSED(thread_state);
#endif
}
static void libsais_partial_sorting_shift_markers_8u_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, const sa_sint_t * RESTRICT buckets, sa_sint_t threads)
{
const fast_sint_t prefetch_distance = 32;
const sa_sint_t * RESTRICT temp_bucket = &buckets[4 * ALPHABET_SIZE];
fast_sint_t c;
#if defined(_OPENMP)
#pragma omp parallel for schedule(static, 1) num_threads(threads) if(threads > 1 && n >= 65536)
#else
UNUSED(threads); UNUSED(n);
#endif
for (c = BUCKETS_INDEX2(UCHAR_MAX, 0); c >= BUCKETS_INDEX2(1, 0); c -= BUCKETS_INDEX2(1, 0))
{
fast_sint_t i, j; sa_sint_t s = SAINT_MIN;
for (i = (fast_sint_t)temp_bucket[c] - 1, j = (fast_sint_t)buckets[c - BUCKETS_INDEX2(1, 0)] + 3; i >= j; i -= 4)
{
libsais_prefetchw(&SA[i - prefetch_distance]);
sa_sint_t p0 = SA[i - 0], q0 = (p0 & SAINT_MIN) ^ s; s = s ^ q0; SA[i - 0] = p0 ^ q0;
sa_sint_t p1 = SA[i - 1], q1 = (p1 & SAINT_MIN) ^ s; s = s ^ q1; SA[i - 1] = p1 ^ q1;
sa_sint_t p2 = SA[i - 2], q2 = (p2 & SAINT_MIN) ^ s; s = s ^ q2; SA[i - 2] = p2 ^ q2;
sa_sint_t p3 = SA[i - 3], q3 = (p3 & SAINT_MIN) ^ s; s = s ^ q3; SA[i - 3] = p3 ^ q3;
}
for (j -= 3; i >= j; i -= 1)
{
sa_sint_t p = SA[i], q = (p & SAINT_MIN) ^ s; s = s ^ q; SA[i] = p ^ q;
}
}
}
static void libsais_partial_sorting_shift_markers_32s_6k_omp(sa_sint_t * RESTRICT SA, sa_sint_t k, const sa_sint_t * RESTRICT buckets, sa_sint_t threads)
{
const fast_sint_t prefetch_distance = 32;
const sa_sint_t * RESTRICT temp_bucket = &buckets[4 * k];
fast_sint_t c;
#if defined(_OPENMP)
#pragma omp parallel for schedule(static, 1) num_threads(threads) if(threads > 1 && k >= 65536)
#else
UNUSED(threads);
#endif
for (c = (fast_sint_t)k - 1; c >= 1; c -= 1)
{
fast_sint_t i, j; sa_sint_t s = SAINT_MIN;
for (i = (fast_sint_t)buckets[BUCKETS_INDEX4(c, 0)] - 1, j = (fast_sint_t)temp_bucket[BUCKETS_INDEX2(c - 1, 0)] + 3; i >= j; i -= 4)
{
libsais_prefetchw(&SA[i - prefetch_distance]);
sa_sint_t p0 = SA[i - 0], q0 = (p0 & SAINT_MIN) ^ s; s = s ^ q0; SA[i - 0] = p0 ^ q0;
sa_sint_t p1 = SA[i - 1], q1 = (p1 & SAINT_MIN) ^ s; s = s ^ q1; SA[i - 1] = p1 ^ q1;
sa_sint_t p2 = SA[i - 2], q2 = (p2 & SAINT_MIN) ^ s; s = s ^ q2; SA[i - 2] = p2 ^ q2;
sa_sint_t p3 = SA[i - 3], q3 = (p3 & SAINT_MIN) ^ s; s = s ^ q3; SA[i - 3] = p3 ^ q3;
}
for (j -= 3; i >= j; i -= 1)
{
sa_sint_t p = SA[i], q = (p & SAINT_MIN) ^ s; s = s ^ q; SA[i] = p ^ q;
}
}
}
static void libsais_partial_sorting_shift_markers_32s_4k(sa_sint_t * RESTRICT SA, sa_sint_t n)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i; sa_sint_t s = SUFFIX_GROUP_MARKER;
for (i = (fast_sint_t)n - 1; i >= 3; i -= 4)
{
libsais_prefetchw(&SA[i - prefetch_distance]);
sa_sint_t p0 = SA[i - 0], q0 = ((p0 & SUFFIX_GROUP_MARKER) ^ s) & ((sa_sint_t)(p0 > 0) << ((SUFFIX_GROUP_BIT - 1))); s = s ^ q0; SA[i - 0] = p0 ^ q0;
sa_sint_t p1 = SA[i - 1], q1 = ((p1 & SUFFIX_GROUP_MARKER) ^ s) & ((sa_sint_t)(p1 > 0) << ((SUFFIX_GROUP_BIT - 1))); s = s ^ q1; SA[i - 1] = p1 ^ q1;
sa_sint_t p2 = SA[i - 2], q2 = ((p2 & SUFFIX_GROUP_MARKER) ^ s) & ((sa_sint_t)(p2 > 0) << ((SUFFIX_GROUP_BIT - 1))); s = s ^ q2; SA[i - 2] = p2 ^ q2;
sa_sint_t p3 = SA[i - 3], q3 = ((p3 & SUFFIX_GROUP_MARKER) ^ s) & ((sa_sint_t)(p3 > 0) << ((SUFFIX_GROUP_BIT - 1))); s = s ^ q3; SA[i - 3] = p3 ^ q3;
}
for (; i >= 0; i -= 1)
{
sa_sint_t p = SA[i], q = ((p & SUFFIX_GROUP_MARKER) ^ s) & ((sa_sint_t)(p > 0) << ((SUFFIX_GROUP_BIT - 1))); s = s ^ q; SA[i] = p ^ q;
}
}
static void libsais_partial_sorting_shift_buckets_32s_6k(sa_sint_t k, sa_sint_t * RESTRICT buckets)
{
sa_sint_t * RESTRICT temp_bucket = &buckets[4 * k];
fast_sint_t i;
for (i = BUCKETS_INDEX2(0, 0); i <= BUCKETS_INDEX2((fast_sint_t)k - 1, 0); i += BUCKETS_INDEX2(1, 0))
{
buckets[2 * i + BUCKETS_INDEX4(0, 0)] = temp_bucket[i + BUCKETS_INDEX2(0, 0)];
buckets[2 * i + BUCKETS_INDEX4(0, 1)] = temp_bucket[i + BUCKETS_INDEX2(0, 1)];
}
}
static sa_sint_t libsais_partial_sorting_scan_right_to_left_8u(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, sa_sint_t d, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
sa_sint_t * RESTRICT induction_bucket = &buckets[0 * ALPHABET_SIZE];
sa_sint_t * RESTRICT distinct_names = &buckets[2 * ALPHABET_SIZE];
fast_sint_t i, j;
for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2)
{
libsais_prefetch(&SA[i - 2 * prefetch_distance]);
libsais_prefetch(&T[SA[i - prefetch_distance - 0] & SAINT_MAX] - 1);
libsais_prefetch(&T[SA[i - prefetch_distance - 0] & SAINT_MAX] - 2);
libsais_prefetch(&T[SA[i - prefetch_distance - 1] & SAINT_MAX] - 1);
libsais_prefetch(&T[SA[i - prefetch_distance - 1] & SAINT_MAX] - 2);
sa_sint_t p0 = SA[i - 0]; d += (p0 < 0); p0 &= SAINT_MAX; sa_sint_t v0 = BUCKETS_INDEX2(T[p0 - 1], T[p0 - 2] > T[p0 - 1]);
SA[--induction_bucket[v0]] = (p0 - 1) | ((sa_sint_t)(distinct_names[v0] != d) << (SAINT_BIT - 1)); distinct_names[v0] = d;
sa_sint_t p1 = SA[i - 1]; d += (p1 < 0); p1 &= SAINT_MAX; sa_sint_t v1 = BUCKETS_INDEX2(T[p1 - 1], T[p1 - 2] > T[p1 - 1]);
SA[--induction_bucket[v1]] = (p1 - 1) | ((sa_sint_t)(distinct_names[v1] != d) << (SAINT_BIT - 1)); distinct_names[v1] = d;
}
for (j -= prefetch_distance + 1; i >= j; i -= 1)
{
sa_sint_t p = SA[i]; d += (p < 0); p &= SAINT_MAX; sa_sint_t v = BUCKETS_INDEX2(T[p - 1], T[p - 2] > T[p - 1]);
SA[--induction_bucket[v]] = (p - 1) | ((sa_sint_t)(distinct_names[v] != d) << (SAINT_BIT - 1)); distinct_names[v] = d;
}
return d;
}
#if defined(_OPENMP)
static void libsais_partial_sorting_scan_right_to_left_8u_block_prepare(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size, LIBSAIS_THREAD_STATE * RESTRICT state)
{
const fast_sint_t prefetch_distance = 32;
sa_sint_t * RESTRICT induction_bucket = &buckets[0 * ALPHABET_SIZE];
sa_sint_t * RESTRICT distinct_names = &buckets[2 * ALPHABET_SIZE];
memset(buckets, 0, 4 * ALPHABET_SIZE * sizeof(sa_sint_t));
fast_sint_t i, j, count = 0; sa_sint_t d = 1;
for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2)
{
libsais_prefetch(&SA[i - 2 * prefetch_distance]);
libsais_prefetch(&T[SA[i - prefetch_distance - 0] & SAINT_MAX] - 1);
libsais_prefetch(&T[SA[i - prefetch_distance - 0] & SAINT_MAX] - 2);
libsais_prefetch(&T[SA[i - prefetch_distance - 1] & SAINT_MAX] - 1);
libsais_prefetch(&T[SA[i - prefetch_distance - 1] & SAINT_MAX] - 2);
sa_sint_t p0 = cache[count].index = SA[i - 0]; d += (p0 < 0); p0 &= SAINT_MAX; sa_sint_t v0 = cache[count++].symbol = BUCKETS_INDEX2(T[p0 - 1], T[p0 - 2] > T[p0 - 1]); induction_bucket[v0]++; distinct_names[v0] = d;
sa_sint_t p1 = cache[count].index = SA[i - 1]; d += (p1 < 0); p1 &= SAINT_MAX; sa_sint_t v1 = cache[count++].symbol = BUCKETS_INDEX2(T[p1 - 1], T[p1 - 2] > T[p1 - 1]); induction_bucket[v1]++; distinct_names[v1] = d;
}
for (j -= prefetch_distance + 1; i >= j; i -= 1)
{
sa_sint_t p = cache[count].index = SA[i]; d += (p < 0); p &= SAINT_MAX; sa_sint_t v = cache[count++].symbol = BUCKETS_INDEX2(T[p - 1], T[p - 2] > T[p - 1]); induction_bucket[v]++; distinct_names[v] = d;
}
state[0].state.position = (fast_sint_t)d - 1;
state[0].state.count = count;
}
static void libsais_partial_sorting_scan_right_to_left_8u_block_place(sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t count, sa_sint_t d)
{
const fast_sint_t prefetch_distance = 32;
sa_sint_t * RESTRICT induction_bucket = &buckets[0 * ALPHABET_SIZE];
sa_sint_t * RESTRICT distinct_names = &buckets[2 * ALPHABET_SIZE];
fast_sint_t i, j;
for (i = 0, j = count - 1; i < j; i += 2)
{
libsais_prefetch(&cache[i + prefetch_distance]);
sa_sint_t p0 = cache[i + 0].index; d += (p0 < 0); sa_sint_t v0 = cache[i + 0].symbol;
SA[--induction_bucket[v0]] = (p0 - 1) | ((sa_sint_t)(distinct_names[v0] != d) << (SAINT_BIT - 1)); distinct_names[v0] = d;
sa_sint_t p1 = cache[i + 1].index; d += (p1 < 0); sa_sint_t v1 = cache[i + 1].symbol;
SA[--induction_bucket[v1]] = (p1 - 1) | ((sa_sint_t)(distinct_names[v1] != d) << (SAINT_BIT - 1)); distinct_names[v1] = d;
}
for (j += 1; i < j; i += 1)
{
sa_sint_t p = cache[i].index; d += (p < 0); sa_sint_t v = cache[i].symbol;
SA[--induction_bucket[v]] = (p - 1) | ((sa_sint_t)(distinct_names[v] != d) << (SAINT_BIT - 1)); distinct_names[v] = d;
}
}
static sa_sint_t libsais_partial_sorting_scan_right_to_left_8u_block_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, sa_sint_t d, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384 && omp_get_dynamic() == 0)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(thread_state);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start;
omp_block_start += block_start;
if (omp_num_threads == 1)
{
d = libsais_partial_sorting_scan_right_to_left_8u(T, SA, buckets, d, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
libsais_partial_sorting_scan_right_to_left_8u_block_prepare(T, SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, omp_block_start, omp_block_size, &thread_state[omp_thread_num]);
}
#pragma omp barrier
#pragma omp master
{
sa_sint_t * RESTRICT induction_bucket = &buckets[0 * ALPHABET_SIZE];
sa_sint_t * RESTRICT distinct_names = &buckets[2 * ALPHABET_SIZE];
fast_sint_t t;
for (t = omp_num_threads - 1; t >= 0; --t)
{
sa_sint_t * RESTRICT temp_induction_bucket = &thread_state[t].state.buckets[0 * ALPHABET_SIZE];
sa_sint_t * RESTRICT temp_distinct_names = &thread_state[t].state.buckets[2 * ALPHABET_SIZE];
fast_sint_t c;
for (c = 0; c < 2 * ALPHABET_SIZE; c += 1) { sa_sint_t A = induction_bucket[c], B = temp_induction_bucket[c]; induction_bucket[c] = A - B; temp_induction_bucket[c] = A; }
for (d -= 1, c = 0; c < 2 * ALPHABET_SIZE; c += 1) { sa_sint_t A = distinct_names[c], B = temp_distinct_names[c], D = B + d; distinct_names[c] = B > 0 ? D : A; temp_distinct_names[c] = A; }
d += 1 + (sa_sint_t)thread_state[t].state.position; thread_state[t].state.position = (fast_sint_t)d - thread_state[t].state.position;
}
}
#pragma omp barrier
{
libsais_partial_sorting_scan_right_to_left_8u_block_place(SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, thread_state[omp_thread_num].state.count, (sa_sint_t)thread_state[omp_thread_num].state.position);
}
}
#endif
}
return d;
}
#endif
static void libsais_partial_sorting_scan_right_to_left_8u_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT buckets, sa_sint_t first_lms_suffix, sa_sint_t left_suffixes_count, sa_sint_t d, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
fast_sint_t scan_start = (fast_sint_t)left_suffixes_count + 1;
fast_sint_t scan_end = (fast_sint_t)n - (fast_sint_t)first_lms_suffix;
if (threads == 1 || (scan_end - scan_start) < 65536)
{
libsais_partial_sorting_scan_right_to_left_8u(T, SA, buckets, d, scan_start, scan_end - scan_start);
}
#if defined(_OPENMP)
else
{
sa_sint_t * RESTRICT induction_bucket = &buckets[0 * ALPHABET_SIZE];
sa_sint_t * RESTRICT distinct_names = &buckets[2 * ALPHABET_SIZE];
fast_sint_t block_start;
for (block_start = scan_end - 1; block_start >= scan_start; )
{
if (SA[block_start] == 0)
{
block_start--;
}
else
{
fast_sint_t block_max_end = block_start - ((fast_sint_t)threads) * (LIBSAIS_PER_THREAD_CACHE_SIZE - 16 * (fast_sint_t)threads); if (block_max_end < scan_start) { block_max_end = scan_start - 1; }
fast_sint_t block_end = block_start - 1; while (block_end > block_max_end && SA[block_end] != 0) { block_end--; }
fast_sint_t block_size = block_start - block_end;
if (block_size < 32)
{
for (; block_start > block_end; block_start -= 1)
{
sa_sint_t p = SA[block_start]; d += (p < 0); p &= SAINT_MAX; sa_sint_t v = BUCKETS_INDEX2(T[p - 1], T[p - 2] > T[p - 1]);
SA[--induction_bucket[v]] = (p - 1) | ((sa_sint_t)(distinct_names[v] != d) << (SAINT_BIT - 1)); distinct_names[v] = d;
}
}
else
{
d = libsais_partial_sorting_scan_right_to_left_8u_block_omp(T, SA, buckets, d, block_end + 1, block_size, threads, thread_state);
block_start = block_end;
}
}
}
}
#else
UNUSED(thread_state);
#endif
}
static sa_sint_t libsais_partial_sorting_scan_right_to_left_32s_6k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, sa_sint_t d, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + 2 * prefetch_distance + 1; i >= j; i -= 2)
{
libsais_prefetch(&SA[i - 3 * prefetch_distance]);
libsais_prefetch(&T[SA[i - 2 * prefetch_distance - 0] & SAINT_MAX] - 1);
libsais_prefetch(&T[SA[i - 2 * prefetch_distance - 0] & SAINT_MAX] - 2);
libsais_prefetch(&T[SA[i - 2 * prefetch_distance - 1] & SAINT_MAX] - 1);
libsais_prefetch(&T[SA[i - 2 * prefetch_distance - 1] & SAINT_MAX] - 2);
sa_sint_t p0 = SA[i - prefetch_distance - 0] & SAINT_MAX; sa_sint_t v0 = BUCKETS_INDEX4(T[p0 - (p0 > 0)], 0); libsais_prefetchw(&buckets[v0]);
sa_sint_t p1 = SA[i - prefetch_distance - 1] & SAINT_MAX; sa_sint_t v1 = BUCKETS_INDEX4(T[p1 - (p1 > 0)], 0); libsais_prefetchw(&buckets[v1]);
sa_sint_t p2 = SA[i - 0]; d += (p2 < 0); p2 &= SAINT_MAX; sa_sint_t v2 = BUCKETS_INDEX4(T[p2 - 1], T[p2 - 2] > T[p2 - 1]);
SA[--buckets[v2]] = (p2 - 1) | ((sa_sint_t)(buckets[2 + v2] != d) << (SAINT_BIT - 1)); buckets[2 + v2] = d;
sa_sint_t p3 = SA[i - 1]; d += (p3 < 0); p3 &= SAINT_MAX; sa_sint_t v3 = BUCKETS_INDEX4(T[p3 - 1], T[p3 - 2] > T[p3 - 1]);
SA[--buckets[v3]] = (p3 - 1) | ((sa_sint_t)(buckets[2 + v3] != d) << (SAINT_BIT - 1)); buckets[2 + v3] = d;
}
for (j -= 2 * prefetch_distance + 1; i >= j; i -= 1)
{
sa_sint_t p = SA[i]; d += (p < 0); p &= SAINT_MAX; sa_sint_t v = BUCKETS_INDEX4(T[p - 1], T[p - 2] > T[p - 1]);
SA[--buckets[v]] = (p - 1) | ((sa_sint_t)(buckets[2 + v] != d) << (SAINT_BIT - 1)); buckets[2 + v] = d;
}
return d;
}
static sa_sint_t libsais_partial_sorting_scan_right_to_left_32s_4k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t d, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
sa_sint_t * RESTRICT induction_bucket = &buckets[3 * k];
sa_sint_t * RESTRICT distinct_names = &buckets[0 * k];
fast_sint_t i, j;
for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + 2 * prefetch_distance + 1; i >= j; i -= 2)
{
libsais_prefetchw(&SA[i - 3 * prefetch_distance]);
sa_sint_t s0 = SA[i - 2 * prefetch_distance - 0]; const sa_sint_t * Ts0 = &T[s0 & ~SUFFIX_GROUP_MARKER] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL);
sa_sint_t s1 = SA[i - 2 * prefetch_distance - 1]; const sa_sint_t * Ts1 = &T[s1 & ~SUFFIX_GROUP_MARKER] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL);
sa_sint_t s2 = SA[i - 1 * prefetch_distance - 0]; if (s2 > 0) { const fast_sint_t Ts2 = T[(s2 & ~SUFFIX_GROUP_MARKER) - 1]; libsais_prefetchw(&induction_bucket[Ts2]); libsais_prefetchw(&distinct_names[BUCKETS_INDEX2(Ts2, 0)]); }
sa_sint_t s3 = SA[i - 1 * prefetch_distance - 1]; if (s3 > 0) { const fast_sint_t Ts3 = T[(s3 & ~SUFFIX_GROUP_MARKER) - 1]; libsais_prefetchw(&induction_bucket[Ts3]); libsais_prefetchw(&distinct_names[BUCKETS_INDEX2(Ts3, 0)]); }
sa_sint_t p0 = SA[i - 0];
if (p0 > 0)
{
SA[i - 0] = 0; d += (p0 >> (SUFFIX_GROUP_BIT - 1)); p0 &= ~SUFFIX_GROUP_MARKER; sa_sint_t v0 = BUCKETS_INDEX2(T[p0 - 1], T[p0 - 2] > T[p0 - 1]);
SA[--induction_bucket[T[p0 - 1]]] = (p0 - 1) | ((sa_sint_t)(T[p0 - 2] > T[p0 - 1]) << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v0] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v0] = d;
}
sa_sint_t p1 = SA[i - 1];
if (p1 > 0)
{
SA[i - 1] = 0; d += (p1 >> (SUFFIX_GROUP_BIT - 1)); p1 &= ~SUFFIX_GROUP_MARKER; sa_sint_t v1 = BUCKETS_INDEX2(T[p1 - 1], T[p1 - 2] > T[p1 - 1]);
SA[--induction_bucket[T[p1 - 1]]] = (p1 - 1) | ((sa_sint_t)(T[p1 - 2] > T[p1 - 1]) << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v1] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v1] = d;
}
}
for (j -= 2 * prefetch_distance + 1; i >= j; i -= 1)
{
sa_sint_t p = SA[i];
if (p > 0)
{
SA[i] = 0; d += (p >> (SUFFIX_GROUP_BIT - 1)); p &= ~SUFFIX_GROUP_MARKER; sa_sint_t v = BUCKETS_INDEX2(T[p - 1], T[p - 2] > T[p - 1]);
SA[--induction_bucket[T[p - 1]]] = (p - 1) | ((sa_sint_t)(T[p - 2] > T[p - 1]) << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v] = d;
}
}
return d;
}
static void libsais_partial_sorting_scan_right_to_left_32s_1k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + 2 * prefetch_distance + 1; i >= j; i -= 2)
{
libsais_prefetchw(&SA[i - 3 * prefetch_distance]);
sa_sint_t s0 = SA[i - 2 * prefetch_distance - 0]; const sa_sint_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL);
sa_sint_t s1 = SA[i - 2 * prefetch_distance - 1]; const sa_sint_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL);
sa_sint_t s2 = SA[i - 1 * prefetch_distance - 0]; if (s2 > 0) { libsais_prefetchw(&induction_bucket[T[s2 - 1]]); libsais_prefetch(&T[s2] - 2); }
sa_sint_t s3 = SA[i - 1 * prefetch_distance - 1]; if (s3 > 0) { libsais_prefetchw(&induction_bucket[T[s3 - 1]]); libsais_prefetch(&T[s3] - 2); }
sa_sint_t p0 = SA[i - 0]; if (p0 > 0) { SA[i - 0] = 0; SA[--induction_bucket[T[p0 - 1]]] = (p0 - 1) | ((sa_sint_t)(T[p0 - 2] > T[p0 - 1]) << (SAINT_BIT - 1)); }
sa_sint_t p1 = SA[i - 1]; if (p1 > 0) { SA[i - 1] = 0; SA[--induction_bucket[T[p1 - 1]]] = (p1 - 1) | ((sa_sint_t)(T[p1 - 2] > T[p1 - 1]) << (SAINT_BIT - 1)); }
}
for (j -= 2 * prefetch_distance + 1; i >= j; i -= 1)
{
sa_sint_t p = SA[i]; if (p > 0) { SA[i] = 0; SA[--induction_bucket[T[p - 1]]] = (p - 1) | ((sa_sint_t)(T[p - 2] > T[p - 1]) << (SAINT_BIT - 1)); }
}
}
#if defined(_OPENMP)
static void libsais_partial_sorting_scan_right_to_left_32s_6k_block_gather(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2)
{
libsais_prefetch(&SA[i + 2 * prefetch_distance]);
libsais_prefetch(&T[SA[i + prefetch_distance + 0] & SAINT_MAX] - 1);
libsais_prefetch(&T[SA[i + prefetch_distance + 0] & SAINT_MAX] - 2);
libsais_prefetch(&T[SA[i + prefetch_distance + 1] & SAINT_MAX] - 1);
libsais_prefetch(&T[SA[i + prefetch_distance + 1] & SAINT_MAX] - 2);
libsais_prefetchw(&cache[i + prefetch_distance]);
sa_sint_t p0 = cache[i + 0].index = SA[i + 0]; sa_sint_t symbol0 = 0; p0 &= SAINT_MAX; if (p0 != 0) { symbol0 = BUCKETS_INDEX4(T[p0 - 1], T[p0 - 2] > T[p0 - 1]); } cache[i + 0].symbol = symbol0;
sa_sint_t p1 = cache[i + 1].index = SA[i + 1]; sa_sint_t symbol1 = 0; p1 &= SAINT_MAX; if (p1 != 0) { symbol1 = BUCKETS_INDEX4(T[p1 - 1], T[p1 - 2] > T[p1 - 1]); } cache[i + 1].symbol = symbol1;
}
for (j += prefetch_distance + 1; i < j; i += 1)
{
sa_sint_t p = cache[i].index = SA[i]; sa_sint_t symbol = 0; p &= SAINT_MAX; if (p != 0) { symbol = BUCKETS_INDEX4(T[p - 1], T[p - 2] > T[p - 1]); } cache[i].symbol = symbol;
}
}
static void libsais_partial_sorting_scan_right_to_left_32s_4k_block_gather(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2)
{
libsais_prefetchw(&SA[i + 2 * prefetch_distance]);
sa_sint_t s0 = SA[i + prefetch_distance + 0]; const sa_sint_t * Ts0 = &T[s0 & ~SUFFIX_GROUP_MARKER] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL);
sa_sint_t s1 = SA[i + prefetch_distance + 1]; const sa_sint_t * Ts1 = &T[s1 & ~SUFFIX_GROUP_MARKER] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL);
libsais_prefetchw(&cache[i + prefetch_distance]);
sa_sint_t symbol0 = SAINT_MIN, p0 = SA[i + 0]; if (p0 > 0) { SA[i + 0] = 0; cache[i + 0].index = p0; p0 &= ~SUFFIX_GROUP_MARKER; symbol0 = BUCKETS_INDEX2(T[p0 - 1], T[p0 - 2] > T[p0 - 1]); } cache[i + 0].symbol = symbol0;
sa_sint_t symbol1 = SAINT_MIN, p1 = SA[i + 1]; if (p1 > 0) { SA[i + 1] = 0; cache[i + 1].index = p1; p1 &= ~SUFFIX_GROUP_MARKER; symbol1 = BUCKETS_INDEX2(T[p1 - 1], T[p1 - 2] > T[p1 - 1]); } cache[i + 1].symbol = symbol1;
}
for (j += prefetch_distance + 1; i < j; i += 1)
{
sa_sint_t symbol = SAINT_MIN, p = SA[i]; if (p > 0) { SA[i] = 0; cache[i].index = p; p &= ~SUFFIX_GROUP_MARKER; symbol = BUCKETS_INDEX2(T[p - 1], T[p - 2] > T[p - 1]); } cache[i].symbol = symbol;
}
}
static void libsais_partial_sorting_scan_right_to_left_32s_1k_block_gather(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2)
{
libsais_prefetchw(&SA[i + 2 * prefetch_distance]);
sa_sint_t s0 = SA[i + prefetch_distance + 0]; const sa_sint_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL);
sa_sint_t s1 = SA[i + prefetch_distance + 1]; const sa_sint_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL);
libsais_prefetchw(&cache[i + prefetch_distance]);
sa_sint_t symbol0 = SAINT_MIN, p0 = SA[i + 0]; if (p0 > 0) { SA[i + 0] = 0; cache[i + 0].index = (p0 - 1) | ((sa_sint_t)(T[p0 - 2] > T[p0 - 1]) << (SAINT_BIT - 1)); symbol0 = T[p0 - 1]; } cache[i + 0].symbol = symbol0;
sa_sint_t symbol1 = SAINT_MIN, p1 = SA[i + 1]; if (p1 > 0) { SA[i + 1] = 0; cache[i + 1].index = (p1 - 1) | ((sa_sint_t)(T[p1 - 2] > T[p1 - 1]) << (SAINT_BIT - 1)); symbol1 = T[p1 - 1]; } cache[i + 1].symbol = symbol1;
}
for (j += prefetch_distance + 1; i < j; i += 1)
{
sa_sint_t symbol = SAINT_MIN, p = SA[i]; if (p > 0) { SA[i] = 0; cache[i].index = (p - 1) | ((sa_sint_t)(T[p - 2] > T[p - 1]) << (SAINT_BIT - 1)); symbol = T[p - 1]; } cache[i].symbol = symbol;
}
}
static sa_sint_t libsais_partial_sorting_scan_right_to_left_32s_6k_block_sort(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT buckets, sa_sint_t d, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2)
{
libsais_prefetchw(&cache[i - 2 * prefetch_distance]);
libsais_prefetchw(&buckets[cache[i - prefetch_distance - 0].symbol]);
libsais_prefetchw(&buckets[cache[i - prefetch_distance - 1].symbol]);
sa_sint_t v0 = cache[i - 0].symbol, p0 = cache[i - 0].index; d += (p0 < 0); cache[i - 0].symbol = --buckets[v0]; cache[i - 0].index = (p0 - 1) | ((sa_sint_t)(buckets[2 + v0] != d) << (SAINT_BIT - 1)); buckets[2 + v0] = d;
if (cache[i - 0].symbol >= omp_block_start) { sa_sint_t s = cache[i - 0].symbol, q = (cache[s].index = cache[i - 0].index) & SAINT_MAX; cache[s].symbol = BUCKETS_INDEX4(T[q - 1], T[q - 2] > T[q - 1]); }
sa_sint_t v1 = cache[i - 1].symbol, p1 = cache[i - 1].index; d += (p1 < 0); cache[i - 1].symbol = --buckets[v1]; cache[i - 1].index = (p1 - 1) | ((sa_sint_t)(buckets[2 + v1] != d) << (SAINT_BIT - 1)); buckets[2 + v1] = d;
if (cache[i - 1].symbol >= omp_block_start) { sa_sint_t s = cache[i - 1].symbol, q = (cache[s].index = cache[i - 1].index) & SAINT_MAX; cache[s].symbol = BUCKETS_INDEX4(T[q - 1], T[q - 2] > T[q - 1]); }
}
for (j -= prefetch_distance + 1; i >= j; i -= 1)
{
sa_sint_t v = cache[i].symbol, p = cache[i].index; d += (p < 0); cache[i].symbol = --buckets[v]; cache[i].index = (p - 1) | ((sa_sint_t)(buckets[2 + v] != d) << (SAINT_BIT - 1)); buckets[2 + v] = d;
if (cache[i].symbol >= omp_block_start) { sa_sint_t s = cache[i].symbol, q = (cache[s].index = cache[i].index) & SAINT_MAX; cache[s].symbol = BUCKETS_INDEX4(T[q - 1], T[q - 2] > T[q - 1]); }
}
return d;
}
static sa_sint_t libsais_partial_sorting_scan_right_to_left_32s_4k_block_sort(const sa_sint_t * RESTRICT T, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t d, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
sa_sint_t * RESTRICT induction_bucket = &buckets[3 * k];
sa_sint_t * RESTRICT distinct_names = &buckets[0 * k];
fast_sint_t i, j;
for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2)
{
libsais_prefetchw(&cache[i - 2 * prefetch_distance]);
sa_sint_t s0 = cache[i - prefetch_distance - 0].symbol; const sa_sint_t * Is0 = &induction_bucket[s0 >> 1]; libsais_prefetchw(s0 >= 0 ? Is0 : NULL); const sa_sint_t * Ds0 = &distinct_names[s0]; libsais_prefetchw(s0 >= 0 ? Ds0 : NULL);
sa_sint_t s1 = cache[i - prefetch_distance - 1].symbol; const sa_sint_t * Is1 = &induction_bucket[s1 >> 1]; libsais_prefetchw(s1 >= 0 ? Is1 : NULL); const sa_sint_t * Ds1 = &distinct_names[s1]; libsais_prefetchw(s1 >= 0 ? Ds1 : NULL);
sa_sint_t v0 = cache[i - 0].symbol;
if (v0 >= 0)
{
sa_sint_t p0 = cache[i - 0].index; d += (p0 >> (SUFFIX_GROUP_BIT - 1)); cache[i - 0].symbol = --induction_bucket[v0 >> 1]; cache[i - 0].index = (p0 - 1) | (v0 << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v0] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v0] = d;
if (cache[i - 0].symbol >= omp_block_start) { sa_sint_t ni = cache[i - 0].symbol, np = cache[i - 0].index; if (np > 0) { cache[i - 0].index = 0; cache[ni].index = np; np &= ~SUFFIX_GROUP_MARKER; cache[ni].symbol = BUCKETS_INDEX2(T[np - 1], T[np - 2] > T[np - 1]); } }
}
sa_sint_t v1 = cache[i - 1].symbol;
if (v1 >= 0)
{
sa_sint_t p1 = cache[i - 1].index; d += (p1 >> (SUFFIX_GROUP_BIT - 1)); cache[i - 1].symbol = --induction_bucket[v1 >> 1]; cache[i - 1].index = (p1 - 1) | (v1 << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v1] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v1] = d;
if (cache[i - 1].symbol >= omp_block_start) { sa_sint_t ni = cache[i - 1].symbol, np = cache[i - 1].index; if (np > 0) { cache[i - 1].index = 0; cache[ni].index = np; np &= ~SUFFIX_GROUP_MARKER; cache[ni].symbol = BUCKETS_INDEX2(T[np - 1], T[np - 2] > T[np - 1]); } }
}
}
for (j -= prefetch_distance + 1; i >= j; i -= 1)
{
sa_sint_t v = cache[i].symbol;
if (v >= 0)
{
sa_sint_t p = cache[i].index; d += (p >> (SUFFIX_GROUP_BIT - 1)); cache[i].symbol = --induction_bucket[v >> 1]; cache[i].index = (p - 1) | (v << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v] = d;
if (cache[i].symbol >= omp_block_start) { sa_sint_t ni = cache[i].symbol, np = cache[i].index; if (np > 0) { cache[i].index = 0; cache[ni].index = np; np &= ~SUFFIX_GROUP_MARKER; cache[ni].symbol = BUCKETS_INDEX2(T[np - 1], T[np - 2] > T[np - 1]); } }
}
}
return d;
}
static void libsais_partial_sorting_scan_right_to_left_32s_1k_block_sort(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT induction_bucket, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2)
{
libsais_prefetchw(&cache[i - 2 * prefetch_distance]);
sa_sint_t s0 = cache[i - prefetch_distance - 0].symbol; const sa_sint_t * Is0 = &induction_bucket[s0]; libsais_prefetchw(s0 >= 0 ? Is0 : NULL);
sa_sint_t s1 = cache[i - prefetch_distance - 1].symbol; const sa_sint_t * Is1 = &induction_bucket[s1]; libsais_prefetchw(s1 >= 0 ? Is1 : NULL);
sa_sint_t v0 = cache[i - 0].symbol;
if (v0 >= 0)
{
cache[i - 0].symbol = --induction_bucket[v0];
if (cache[i - 0].symbol >= omp_block_start) { sa_sint_t ni = cache[i - 0].symbol, np = cache[i - 0].index; if (np > 0) { cache[i - 0].index = 0; cache[ni].index = (np - 1) | ((sa_sint_t)(T[np - 2] > T[np - 1]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np - 1]; } }
}
sa_sint_t v1 = cache[i - 1].symbol;
if (v1 >= 0)
{
cache[i - 1].symbol = --induction_bucket[v1];
if (cache[i - 1].symbol >= omp_block_start) { sa_sint_t ni = cache[i - 1].symbol, np = cache[i - 1].index; if (np > 0) { cache[i - 1].index = 0; cache[ni].index = (np - 1) | ((sa_sint_t)(T[np - 2] > T[np - 1]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np - 1]; }}
}
}
for (j -= prefetch_distance + 1; i >= j; i -= 1)
{
sa_sint_t v = cache[i].symbol;
if (v >= 0)
{
cache[i].symbol = --induction_bucket[v];
if (cache[i].symbol >= omp_block_start) { sa_sint_t ni = cache[i].symbol, np = cache[i].index; if (np > 0) { cache[i].index = 0; cache[ni].index = (np - 1) | ((sa_sint_t)(T[np - 2] > T[np - 1]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np - 1]; } }
}
}
}
static sa_sint_t libsais_partial_sorting_scan_right_to_left_32s_6k_block_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, sa_sint_t d, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(cache);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start;
omp_block_start += block_start;
if (omp_num_threads == 1)
{
d = libsais_partial_sorting_scan_right_to_left_32s_6k(T, SA, buckets, d, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
libsais_partial_sorting_scan_right_to_left_32s_6k_block_gather(T, SA, cache - block_start, omp_block_start, omp_block_size);
}
#pragma omp barrier
#pragma omp master
{
d = libsais_partial_sorting_scan_right_to_left_32s_6k_block_sort(T, buckets, d, cache - block_start, block_start, block_size);
}
#pragma omp barrier
{
libsais_place_cached_suffixes(SA, cache - block_start, omp_block_start, omp_block_size);
}
}
#endif
}
return d;
}
static sa_sint_t libsais_partial_sorting_scan_right_to_left_32s_4k_block_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t d, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(cache);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start;
omp_block_start += block_start;
if (omp_num_threads == 1)
{
d = libsais_partial_sorting_scan_right_to_left_32s_4k(T, SA, k, buckets, d, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
libsais_partial_sorting_scan_right_to_left_32s_4k_block_gather(T, SA, cache - block_start, omp_block_start, omp_block_size);
}
#pragma omp barrier
#pragma omp master
{
d = libsais_partial_sorting_scan_right_to_left_32s_4k_block_sort(T, k, buckets, d, cache - block_start, block_start, block_size);
}
#pragma omp barrier
{
libsais_compact_and_place_cached_suffixes(SA, cache - block_start, omp_block_start, omp_block_size);
}
}
#endif
}
return d;
}
static void libsais_partial_sorting_scan_right_to_left_32s_1k_block_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(cache);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start;
omp_block_start += block_start;
if (omp_num_threads == 1)
{
libsais_partial_sorting_scan_right_to_left_32s_1k(T, SA, buckets, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
libsais_partial_sorting_scan_right_to_left_32s_1k_block_gather(T, SA, cache - block_start, omp_block_start, omp_block_size);
}
#pragma omp barrier
#pragma omp master
{
libsais_partial_sorting_scan_right_to_left_32s_1k_block_sort(T, buckets, cache - block_start, block_start, block_size);
}
#pragma omp barrier
{
libsais_compact_and_place_cached_suffixes(SA, cache - block_start, omp_block_start, omp_block_size);
}
}
#endif
}
}
#endif
static sa_sint_t libsais_partial_sorting_scan_right_to_left_32s_6k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT buckets, sa_sint_t first_lms_suffix, sa_sint_t left_suffixes_count, sa_sint_t d, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
fast_sint_t scan_start = (fast_sint_t)left_suffixes_count + 1;
fast_sint_t scan_end = (fast_sint_t)n - (fast_sint_t)first_lms_suffix;
if (threads == 1 || (scan_end - scan_start) < 65536)
{
d = libsais_partial_sorting_scan_right_to_left_32s_6k(T, SA, buckets, d, scan_start, scan_end - scan_start);
}
#if defined(_OPENMP)
else
{
fast_sint_t block_start, block_end;
for (block_start = scan_end - 1; block_start >= scan_start; block_start = block_end)
{
block_end = block_start - (fast_sint_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE; if (block_end < scan_start) { block_end = scan_start - 1; }
d = libsais_partial_sorting_scan_right_to_left_32s_6k_block_omp(T, SA, buckets, d, thread_state[0].state.cache, block_end + 1, block_start - block_end, threads);
}
}
#else
UNUSED(thread_state);
#endif
return d;
}
static sa_sint_t libsais_partial_sorting_scan_right_to_left_32s_4k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t d, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
if (threads == 1 || n < 65536)
{
d = libsais_partial_sorting_scan_right_to_left_32s_4k(T, SA, k, buckets, d, 0, n);
}
#if defined(_OPENMP)
else
{
fast_sint_t block_start, block_end;
for (block_start = (fast_sint_t)n - 1; block_start >= 0; block_start = block_end)
{
block_end = block_start - (fast_sint_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE; if (block_end < 0) { block_end = -1; }
d = libsais_partial_sorting_scan_right_to_left_32s_4k_block_omp(T, SA, k, buckets, d, thread_state[0].state.cache, block_end + 1, block_start - block_end, threads);
}
}
#else
UNUSED(thread_state);
#endif
return d;
}
static void libsais_partial_sorting_scan_right_to_left_32s_1k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
if (threads == 1 || n < 65536)
{
libsais_partial_sorting_scan_right_to_left_32s_1k(T, SA, buckets, 0, n);
}
#if defined(_OPENMP)
else
{
fast_sint_t block_start, block_end;
for (block_start = (fast_sint_t)n - 1; block_start >= 0; block_start = block_end)
{
block_end = block_start - (fast_sint_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE; if (block_end < 0) { block_end = -1; }
libsais_partial_sorting_scan_right_to_left_32s_1k_block_omp(T, SA, buckets, thread_state[0].state.cache, block_end + 1, block_start - block_end, threads);
}
}
#else
UNUSED(thread_state);
#endif
}
static fast_sint_t libsais_partial_sorting_gather_lms_suffixes_32s_4k(sa_sint_t * RESTRICT SA, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j, l;
for (i = omp_block_start, j = omp_block_start + omp_block_size - 3, l = omp_block_start; i < j; i += 4)
{
libsais_prefetch(&SA[i + prefetch_distance]);
sa_sint_t s0 = SA[i + 0]; SA[l] = (s0 - SUFFIX_GROUP_MARKER) & (~SUFFIX_GROUP_MARKER); l += (s0 < 0);
sa_sint_t s1 = SA[i + 1]; SA[l] = (s1 - SUFFIX_GROUP_MARKER) & (~SUFFIX_GROUP_MARKER); l += (s1 < 0);
sa_sint_t s2 = SA[i + 2]; SA[l] = (s2 - SUFFIX_GROUP_MARKER) & (~SUFFIX_GROUP_MARKER); l += (s2 < 0);
sa_sint_t s3 = SA[i + 3]; SA[l] = (s3 - SUFFIX_GROUP_MARKER) & (~SUFFIX_GROUP_MARKER); l += (s3 < 0);
}
for (j += 3; i < j; i += 1)
{
sa_sint_t s = SA[i]; SA[l] = (s - SUFFIX_GROUP_MARKER) & (~SUFFIX_GROUP_MARKER); l += (s < 0);
}
return l;
}
static fast_sint_t libsais_partial_sorting_gather_lms_suffixes_32s_1k(sa_sint_t * RESTRICT SA, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j, l;
for (i = omp_block_start, j = omp_block_start + omp_block_size - 3, l = omp_block_start; i < j; i += 4)
{
libsais_prefetch(&SA[i + prefetch_distance]);
sa_sint_t s0 = SA[i + 0]; SA[l] = s0 & SAINT_MAX; l += (s0 < 0);
sa_sint_t s1 = SA[i + 1]; SA[l] = s1 & SAINT_MAX; l += (s1 < 0);
sa_sint_t s2 = SA[i + 2]; SA[l] = s2 & SAINT_MAX; l += (s2 < 0);
sa_sint_t s3 = SA[i + 3]; SA[l] = s3 & SAINT_MAX; l += (s3 < 0);
}
for (j += 3; i < j; i += 1)
{
sa_sint_t s = SA[i]; SA[l] = s & SAINT_MAX; l += (s < 0);
}
return l;
}
static void libsais_partial_sorting_gather_lms_suffixes_32s_4k_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(thread_state);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (n / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : n - omp_block_start;
if (omp_num_threads == 1)
{
libsais_partial_sorting_gather_lms_suffixes_32s_4k(SA, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
thread_state[omp_thread_num].state.position = omp_block_start;
thread_state[omp_thread_num].state.count = libsais_partial_sorting_gather_lms_suffixes_32s_4k(SA, omp_block_start, omp_block_size) - omp_block_start;
}
#pragma omp barrier
#pragma omp master
{
fast_sint_t t, position = 0;
for (t = 0; t < omp_num_threads; ++t)
{
if (t > 0 && thread_state[t].state.count > 0)
{
memmove(&SA[position], &SA[thread_state[t].state.position], (size_t)thread_state[t].state.count * sizeof(sa_sint_t));
}
position += thread_state[t].state.count;
}
}
}
#endif
}
}
static void libsais_partial_sorting_gather_lms_suffixes_32s_1k_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(thread_state);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (n / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : n - omp_block_start;
if (omp_num_threads == 1)
{
libsais_partial_sorting_gather_lms_suffixes_32s_1k(SA, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
thread_state[omp_thread_num].state.position = omp_block_start;
thread_state[omp_thread_num].state.count = libsais_partial_sorting_gather_lms_suffixes_32s_1k(SA, omp_block_start, omp_block_size) - omp_block_start;
}
#pragma omp barrier
#pragma omp master
{
fast_sint_t t, position = 0;
for (t = 0; t < omp_num_threads; ++t)
{
if (t > 0 && thread_state[t].state.count > 0)
{
memmove(&SA[position], &SA[thread_state[t].state.position], (size_t)thread_state[t].state.count * sizeof(sa_sint_t));
}
position += thread_state[t].state.count;
}
}
}
#endif
}
}
static void libsais_induce_partial_order_8u_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT buckets, sa_sint_t first_lms_suffix, sa_sint_t left_suffixes_count, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
memset(&buckets[2 * ALPHABET_SIZE], 0, 2 * ALPHABET_SIZE * sizeof(sa_sint_t));
sa_sint_t d = libsais_partial_sorting_scan_left_to_right_8u_omp(T, SA, n, buckets, left_suffixes_count, 0, threads, thread_state);
libsais_partial_sorting_shift_markers_8u_omp(SA, n, buckets, threads);
libsais_partial_sorting_scan_right_to_left_8u_omp(T, SA, n, buckets, first_lms_suffix, left_suffixes_count, d, threads, thread_state);
}
static void libsais_induce_partial_order_32s_6k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t first_lms_suffix, sa_sint_t left_suffixes_count, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
sa_sint_t d = libsais_partial_sorting_scan_left_to_right_32s_6k_omp(T, SA, n, buckets, left_suffixes_count, 0, threads, thread_state);
libsais_partial_sorting_shift_markers_32s_6k_omp(SA, k, buckets, threads);
libsais_partial_sorting_shift_buckets_32s_6k(k, buckets);
libsais_partial_sorting_scan_right_to_left_32s_6k_omp(T, SA, n, buckets, first_lms_suffix, left_suffixes_count, d, threads, thread_state);
}
static void libsais_induce_partial_order_32s_4k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
memset(buckets, 0, 2 * (size_t)k * sizeof(sa_sint_t));
sa_sint_t d = libsais_partial_sorting_scan_left_to_right_32s_4k_omp(T, SA, n, k, buckets, 0, threads, thread_state);
libsais_partial_sorting_shift_markers_32s_4k(SA, n);
libsais_partial_sorting_scan_right_to_left_32s_4k_omp(T, SA, n, k, buckets, d, threads, thread_state);
libsais_partial_sorting_gather_lms_suffixes_32s_4k_omp(SA, n, threads, thread_state);
}
static void libsais_induce_partial_order_32s_2k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
libsais_partial_sorting_scan_left_to_right_32s_1k_omp(T, SA, n, &buckets[1 * k], threads, thread_state);
libsais_partial_sorting_scan_right_to_left_32s_1k_omp(T, SA, n, &buckets[0 * k], threads, thread_state);
libsais_partial_sorting_gather_lms_suffixes_32s_1k_omp(SA, n, threads, thread_state);
}
static void libsais_induce_partial_order_32s_1k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
libsais_count_suffixes_32s(T, n, k, buckets);
libsais_initialize_buckets_start_32s_1k(k, buckets);
libsais_partial_sorting_scan_left_to_right_32s_1k_omp(T, SA, n, buckets, threads, thread_state);
libsais_count_suffixes_32s(T, n, k, buckets);
libsais_initialize_buckets_end_32s_1k(k, buckets);
libsais_partial_sorting_scan_right_to_left_32s_1k_omp(T, SA, n, buckets, threads, thread_state);
libsais_partial_sorting_gather_lms_suffixes_32s_1k_omp(SA, n, threads, thread_state);
}
static sa_sint_t libsais_renumber_lms_suffixes_8u(sa_sint_t * RESTRICT SA, sa_sint_t m, sa_sint_t name, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
sa_sint_t * RESTRICT SAm = &SA[m];
fast_sint_t i, j;
for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 3; i < j; i += 4)
{
libsais_prefetch(&SA[i + 2 * prefetch_distance]);
libsais_prefetchw(&SAm[(SA[i + prefetch_distance + 0] & SAINT_MAX) >> 1]);
libsais_prefetchw(&SAm[(SA[i + prefetch_distance + 1] & SAINT_MAX) >> 1]);
libsais_prefetchw(&SAm[(SA[i + prefetch_distance + 2] & SAINT_MAX) >> 1]);
libsais_prefetchw(&SAm[(SA[i + prefetch_distance + 3] & SAINT_MAX) >> 1]);
sa_sint_t p0 = SA[i + 0]; SAm[(p0 & SAINT_MAX) >> 1] = name | SAINT_MIN; name += p0 < 0;
sa_sint_t p1 = SA[i + 1]; SAm[(p1 & SAINT_MAX) >> 1] = name | SAINT_MIN; name += p1 < 0;
sa_sint_t p2 = SA[i + 2]; SAm[(p2 & SAINT_MAX) >> 1] = name | SAINT_MIN; name += p2 < 0;
sa_sint_t p3 = SA[i + 3]; SAm[(p3 & SAINT_MAX) >> 1] = name | SAINT_MIN; name += p3 < 0;
}
for (j += prefetch_distance + 3; i < j; i += 1)
{
sa_sint_t p = SA[i]; SAm[(p & SAINT_MAX) >> 1] = name | SAINT_MIN; name += p < 0;
}
return name;
}
static fast_sint_t libsais_gather_marked_suffixes_8u(sa_sint_t * RESTRICT SA, sa_sint_t m, fast_sint_t l, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
l -= 1;
fast_sint_t i, j;
for (i = (fast_sint_t)m + omp_block_start + omp_block_size - 1, j = (fast_sint_t)m + omp_block_start + 3; i >= j; i -= 4)
{
libsais_prefetch(&SA[i - prefetch_distance]);
sa_sint_t s0 = SA[i - 0]; SA[l] = s0 & SAINT_MAX; l -= s0 < 0;
sa_sint_t s1 = SA[i - 1]; SA[l] = s1 & SAINT_MAX; l -= s1 < 0;
sa_sint_t s2 = SA[i - 2]; SA[l] = s2 & SAINT_MAX; l -= s2 < 0;
sa_sint_t s3 = SA[i - 3]; SA[l] = s3 & SAINT_MAX; l -= s3 < 0;
}
for (j -= 3; i >= j; i -= 1)
{
sa_sint_t s = SA[i]; SA[l] = s & SAINT_MAX; l -= s < 0;
}
l += 1;
return l;
}
static sa_sint_t libsais_renumber_lms_suffixes_8u_omp(sa_sint_t * RESTRICT SA, sa_sint_t m, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
sa_sint_t name = 0;
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && m >= 65536)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(thread_state);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (m / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : m - omp_block_start;
if (omp_num_threads == 1)
{
name = libsais_renumber_lms_suffixes_8u(SA, m, 0, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
thread_state[omp_thread_num].state.count = libsais_count_negative_marked_suffixes(SA, omp_block_start, omp_block_size);
}
#pragma omp barrier
{
fast_sint_t t, count = 0; for (t = 0; t < omp_thread_num; ++t) { count += thread_state[t].state.count; }
if (omp_thread_num == omp_num_threads - 1)
{
name = (sa_sint_t)(count + thread_state[omp_thread_num].state.count);
}
libsais_renumber_lms_suffixes_8u(SA, m, (sa_sint_t)count, omp_block_start, omp_block_size);
}
}
#endif
}
return name;
}
static void libsais_gather_marked_lms_suffixes_8u_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t fs, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && n >= 131072)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(thread_state);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (((fast_sint_t)n >> 1) / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : ((fast_sint_t)n >> 1) - omp_block_start;
if (omp_num_threads == 1)
{
libsais_gather_marked_suffixes_8u(SA, m, (fast_sint_t)n + (fast_sint_t)fs, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
if (omp_thread_num < omp_num_threads - 1)
{
thread_state[omp_thread_num].state.position = libsais_gather_marked_suffixes_8u(SA, m, (fast_sint_t)m + omp_block_start + omp_block_size, omp_block_start, omp_block_size);
thread_state[omp_thread_num].state.count = (fast_sint_t)m + omp_block_start + omp_block_size - thread_state[omp_thread_num].state.position;
}
else
{
thread_state[omp_thread_num].state.position = libsais_gather_marked_suffixes_8u(SA, m, (fast_sint_t)n + (fast_sint_t)fs, omp_block_start, omp_block_size);
thread_state[omp_thread_num].state.count = (fast_sint_t)n + (fast_sint_t)fs - thread_state[omp_thread_num].state.position;
}
}
#pragma omp barrier
#pragma omp master
{
fast_sint_t t, position = (fast_sint_t)n + (fast_sint_t)fs;
for (t = omp_num_threads - 1; t >= 0; --t)
{
position -= thread_state[t].state.count;
if (t != omp_num_threads - 1 && thread_state[t].state.count > 0)
{
memmove(&SA[position], &SA[thread_state[t].state.position], (size_t)thread_state[t].state.count * sizeof(sa_sint_t));
}
}
}
}
#endif
}
}
static sa_sint_t libsais_renumber_and_gather_lms_suffixes_8u_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t fs, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
memset(&SA[m], 0, ((size_t)n >> 1) * sizeof(sa_sint_t));
sa_sint_t name = libsais_renumber_lms_suffixes_8u_omp(SA, m, threads, thread_state);
if (name < m)
{
libsais_gather_marked_lms_suffixes_8u_omp(SA, n, m, fs, threads, thread_state);
}
else
{
fast_sint_t i; for (i = 0; i < m; i += 1) { SA[i] &= SAINT_MAX; }
}
return name;
}
static sa_sint_t libsais_renumber_distinct_lms_suffixes_32s_4k(sa_sint_t * RESTRICT SA, sa_sint_t m, sa_sint_t name, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
sa_sint_t * RESTRICT SAm = &SA[m];
fast_sint_t i, j; sa_sint_t p0, p1, p2, p3 = 0;
for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 3; i < j; i += 4)
{
libsais_prefetchw(&SA[i + 2 * prefetch_distance]);
libsais_prefetchw(&SAm[(SA[i + prefetch_distance + 0] & SAINT_MAX) >> 1]);
libsais_prefetchw(&SAm[(SA[i + prefetch_distance + 1] & SAINT_MAX) >> 1]);
libsais_prefetchw(&SAm[(SA[i + prefetch_distance + 2] & SAINT_MAX) >> 1]);
libsais_prefetchw(&SAm[(SA[i + prefetch_distance + 3] & SAINT_MAX) >> 1]);
p0 = SA[i + 0]; SAm[(SA[i + 0] = p0 & SAINT_MAX) >> 1] = name | (p0 & p3 & SAINT_MIN); name += p0 < 0;
p1 = SA[i + 1]; SAm[(SA[i + 1] = p1 & SAINT_MAX) >> 1] = name | (p1 & p0 & SAINT_MIN); name += p1 < 0;
p2 = SA[i + 2]; SAm[(SA[i + 2] = p2 & SAINT_MAX) >> 1] = name | (p2 & p1 & SAINT_MIN); name += p2 < 0;
p3 = SA[i + 3]; SAm[(SA[i + 3] = p3 & SAINT_MAX) >> 1] = name | (p3 & p2 & SAINT_MIN); name += p3 < 0;
}
for (j += prefetch_distance + 3; i < j; i += 1)
{
p2 = p3; p3 = SA[i]; SAm[(SA[i] = p3 & SAINT_MAX) >> 1] = name | (p3 & p2 & SAINT_MIN); name += p3 < 0;
}
return name;
}
static void libsais_mark_distinct_lms_suffixes_32s(sa_sint_t * RESTRICT SA, sa_sint_t m, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j; sa_sint_t p0, p1, p2, p3 = 0;
for (i = (fast_sint_t)m + omp_block_start, j = (fast_sint_t)m + omp_block_start + omp_block_size - 3; i < j; i += 4)
{
libsais_prefetchw(&SA[i + prefetch_distance]);
p0 = SA[i + 0]; SA[i + 0] = p0 & (p3 | SAINT_MAX); p0 = (p0 == 0) ? p3 : p0;
p1 = SA[i + 1]; SA[i + 1] = p1 & (p0 | SAINT_MAX); p1 = (p1 == 0) ? p0 : p1;
p2 = SA[i + 2]; SA[i + 2] = p2 & (p1 | SAINT_MAX); p2 = (p2 == 0) ? p1 : p2;
p3 = SA[i + 3]; SA[i + 3] = p3 & (p2 | SAINT_MAX); p3 = (p3 == 0) ? p2 : p3;
}
for (j += 3; i < j; i += 1)
{
p2 = p3; p3 = SA[i]; SA[i] = p3 & (p2 | SAINT_MAX); p3 = (p3 == 0) ? p2 : p3;
}
}
static void libsais_clamp_lms_suffixes_length_32s(sa_sint_t * RESTRICT SA, sa_sint_t m, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
sa_sint_t * RESTRICT SAm = &SA[m];
fast_sint_t i, j;
for (i = omp_block_start, j = omp_block_start + omp_block_size - 3; i < j; i += 4)
{
libsais_prefetchw(&SAm[i + prefetch_distance]);
SAm[i + 0] = (SAm[i + 0] < 0 ? SAm[i + 0] : 0) & SAINT_MAX;
SAm[i + 1] = (SAm[i + 1] < 0 ? SAm[i + 1] : 0) & SAINT_MAX;
SAm[i + 2] = (SAm[i + 2] < 0 ? SAm[i + 2] : 0) & SAINT_MAX;
SAm[i + 3] = (SAm[i + 3] < 0 ? SAm[i + 3] : 0) & SAINT_MAX;
}
for (j += 3; i < j; i += 1)
{
SAm[i] = (SAm[i] < 0 ? SAm[i] : 0) & SAINT_MAX;
}
}
static sa_sint_t libsais_renumber_distinct_lms_suffixes_32s_4k_omp(sa_sint_t * RESTRICT SA, sa_sint_t m, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
sa_sint_t name = 0;
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && m >= 65536)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(thread_state);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (m / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : m - omp_block_start;
if (omp_num_threads == 1)
{
name = libsais_renumber_distinct_lms_suffixes_32s_4k(SA, m, 1, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
thread_state[omp_thread_num].state.count = libsais_count_negative_marked_suffixes(SA, omp_block_start, omp_block_size);
}
#pragma omp barrier
{
fast_sint_t t, count = 1; for (t = 0; t < omp_thread_num; ++t) { count += thread_state[t].state.count; }
if (omp_thread_num == omp_num_threads - 1)
{
name = (sa_sint_t)(count + thread_state[omp_thread_num].state.count);
}
libsais_renumber_distinct_lms_suffixes_32s_4k(SA, m, (sa_sint_t)count, omp_block_start, omp_block_size);
}
}
#endif
}
return name - 1;
}
static void libsais_mark_distinct_lms_suffixes_32s_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t threads)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && n >= 131072)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
fast_sint_t omp_block_stride = (((fast_sint_t)n >> 1) / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : ((fast_sint_t)n >> 1) - omp_block_start;
#else
UNUSED(threads);
fast_sint_t omp_block_start = 0;
fast_sint_t omp_block_size = (fast_sint_t)n >> 1;
#endif
libsais_mark_distinct_lms_suffixes_32s(SA, m, omp_block_start, omp_block_size);
}
}
static void libsais_clamp_lms_suffixes_length_32s_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t threads)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && n >= 131072)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
fast_sint_t omp_block_stride = (((fast_sint_t)n >> 1) / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : ((fast_sint_t)n >> 1) - omp_block_start;
#else
UNUSED(threads);
fast_sint_t omp_block_start = 0;
fast_sint_t omp_block_size = (fast_sint_t)n >> 1;
#endif
libsais_clamp_lms_suffixes_length_32s(SA, m, omp_block_start, omp_block_size);
}
}
static sa_sint_t libsais_renumber_and_mark_distinct_lms_suffixes_32s_4k_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
memset(&SA[m], 0, ((size_t)n >> 1) * sizeof(sa_sint_t));
sa_sint_t name = libsais_renumber_distinct_lms_suffixes_32s_4k_omp(SA, m, threads, thread_state);
if (name < m)
{
libsais_mark_distinct_lms_suffixes_32s_omp(SA, n, m, threads);
}
return name;
}
static sa_sint_t libsais_renumber_and_mark_distinct_lms_suffixes_32s_1k_omp(sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t threads)
{
const fast_sint_t prefetch_distance = 32;
sa_sint_t * RESTRICT SAm = &SA[m];
{
libsais_gather_lms_suffixes_32s(T, SA, n);
memset(&SA[m], 0, ((size_t)n - (size_t)m - (size_t)m) * sizeof(sa_sint_t));
fast_sint_t i, j;
for (i = (fast_sint_t)n - (fast_sint_t)m, j = (fast_sint_t)n - 1 - prefetch_distance - 3; i < j; i += 4)
{
libsais_prefetch(&SA[i + 2 * prefetch_distance]);
libsais_prefetchw(&SAm[((sa_uint_t)SA[i + prefetch_distance + 0]) >> 1]);
libsais_prefetchw(&SAm[((sa_uint_t)SA[i + prefetch_distance + 1]) >> 1]);
libsais_prefetchw(&SAm[((sa_uint_t)SA[i + prefetch_distance + 2]) >> 1]);
libsais_prefetchw(&SAm[((sa_uint_t)SA[i + prefetch_distance + 3]) >> 1]);
SAm[((sa_uint_t)SA[i + 0]) >> 1] = SA[i + 1] - SA[i + 0] + 1 + SAINT_MIN;
SAm[((sa_uint_t)SA[i + 1]) >> 1] = SA[i + 2] - SA[i + 1] + 1 + SAINT_MIN;
SAm[((sa_uint_t)SA[i + 2]) >> 1] = SA[i + 3] - SA[i + 2] + 1 + SAINT_MIN;
SAm[((sa_uint_t)SA[i + 3]) >> 1] = SA[i + 4] - SA[i + 3] + 1 + SAINT_MIN;
}
for (j += prefetch_distance + 3; i < j; i += 1)
{
SAm[((sa_uint_t)SA[i]) >> 1] = SA[i + 1] - SA[i] + 1 + SAINT_MIN;
}
SAm[((sa_uint_t)SA[n - 1]) >> 1] = 1 + SAINT_MIN;
}
{
libsais_clamp_lms_suffixes_length_32s_omp(SA, n, m, threads);
}
sa_sint_t name = 1;
{
fast_sint_t i, j, p = SA[0], plen = SAm[p >> 1]; sa_sint_t pdiff = SAINT_MIN;
for (i = 1, j = m - prefetch_distance - 1; i < j; i += 2)
{
libsais_prefetch(&SA[i + 2 * prefetch_distance]);
libsais_prefetchw(&SAm[((sa_uint_t)SA[i + prefetch_distance + 0]) >> 1]); libsais_prefetch(&T[((sa_uint_t)SA[i + prefetch_distance + 0])]);
libsais_prefetchw(&SAm[((sa_uint_t)SA[i + prefetch_distance + 1]) >> 1]); libsais_prefetch(&T[((sa_uint_t)SA[i + prefetch_distance + 1])]);
fast_sint_t q = SA[i + 0], qlen = SAm[q >> 1]; sa_sint_t qdiff = SAINT_MIN;
if (plen == qlen) { fast_sint_t l = 0; do { if (T[p + l] != T[q + l]) { break; } } while (++l < qlen); qdiff = (l - qlen) & SAINT_MIN; }
SAm[p >> 1] = name | (pdiff & qdiff); name += (qdiff < 0);
p = SA[i + 1]; plen = SAm[p >> 1]; pdiff = SAINT_MIN;
if (qlen == plen) { fast_sint_t l = 0; do { if (T[q + l] != T[p + l]) { break; } } while (++l < plen); pdiff = (l - plen) & SAINT_MIN; }
SAm[q >> 1] = name | (qdiff & pdiff); name += (pdiff < 0);
}
for (j += prefetch_distance + 1; i < j; i += 1)
{
fast_sint_t q = SA[i], qlen = SAm[q >> 1]; sa_sint_t qdiff = SAINT_MIN;
if (plen == qlen) { fast_sint_t l = 0; do { if (T[p + l] != T[q + l]) { break; } } while (++l < plen); qdiff = (l - plen) & SAINT_MIN; }
SAm[p >> 1] = name | (pdiff & qdiff); name += (qdiff < 0);
p = q; plen = qlen; pdiff = qdiff;
}
SAm[p >> 1] = name | pdiff; name++;
}
if (name <= m)
{
libsais_mark_distinct_lms_suffixes_32s_omp(SA, n, m, threads);
}
return name - 1;
}
static void libsais_reconstruct_lms_suffixes(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
const sa_sint_t * RESTRICT SAnm = &SA[n - m];
fast_sint_t i, j;
for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 3; i < j; i += 4)
{
libsais_prefetchw(&SA[i + 2 * prefetch_distance]);
libsais_prefetch(&SAnm[SA[i + prefetch_distance + 0]]);
libsais_prefetch(&SAnm[SA[i + prefetch_distance + 1]]);
libsais_prefetch(&SAnm[SA[i + prefetch_distance + 2]]);
libsais_prefetch(&SAnm[SA[i + prefetch_distance + 3]]);
SA[i + 0] = SAnm[SA[i + 0]];
SA[i + 1] = SAnm[SA[i + 1]];
SA[i + 2] = SAnm[SA[i + 2]];
SA[i + 3] = SAnm[SA[i + 3]];
}
for (j += prefetch_distance + 3; i < j; i += 1)
{
SA[i] = SAnm[SA[i]];
}
}
static void libsais_reconstruct_lms_suffixes_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t threads)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && m >= 65536)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
fast_sint_t omp_block_stride = (m / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : m - omp_block_start;
#else
UNUSED(threads);
fast_sint_t omp_block_start = 0;
fast_sint_t omp_block_size = m;
#endif
libsais_reconstruct_lms_suffixes(SA, n, m, omp_block_start, omp_block_size);
}
}
static void libsais_place_lms_suffixes_interval_8u(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, const sa_sint_t * RESTRICT buckets)
{
const sa_sint_t * RESTRICT bucket_end = &buckets[7 * ALPHABET_SIZE];
fast_sint_t c, j = n;
for (c = UCHAR_MAX - 1; c >= 0; --c)
{
fast_sint_t l = (fast_sint_t)buckets[BUCKETS_INDEX2(c, 1) + BUCKETS_INDEX2(1, 0)] - (fast_sint_t)buckets[BUCKETS_INDEX2(c, 1)];
if (l > 0)
{
fast_sint_t i = bucket_end[c];
if (j - i > 0)
{
memset(&SA[i], 0, (size_t)(j - i) * sizeof(sa_sint_t));
}
memmove(&SA[j = (i - l)], &SA[m -= (sa_sint_t)l], (size_t)l * sizeof(sa_sint_t));
}
}
memset(&SA[0], 0, (size_t)j * sizeof(sa_sint_t));
}
static void libsais_place_lms_suffixes_interval_32s_4k(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t m, const sa_sint_t * RESTRICT buckets)
{
const sa_sint_t * RESTRICT bucket_end = &buckets[3 * k];
fast_sint_t c, j = n;
for (c = (fast_sint_t)k - 2; c >= 0; --c)
{
fast_sint_t l = (fast_sint_t)buckets[BUCKETS_INDEX2(c, 1) + BUCKETS_INDEX2(1, 0)] - (fast_sint_t)buckets[BUCKETS_INDEX2(c, 1)];
if (l > 0)
{
fast_sint_t i = bucket_end[c];
if (j - i > 0)
{
memset(&SA[i], 0, (size_t)(j - i) * sizeof(sa_sint_t));
}
memmove(&SA[j = (i - l)], &SA[m -= (sa_sint_t)l], (size_t)l * sizeof(sa_sint_t));
}
}
memset(&SA[0], 0, (size_t)j * sizeof(sa_sint_t));
}
static void libsais_place_lms_suffixes_interval_32s_2k(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t m, const sa_sint_t * RESTRICT buckets)
{
fast_sint_t c, j = n;
for (c = BUCKETS_INDEX2((fast_sint_t)k - 2, 0); c >= BUCKETS_INDEX2(0, 0); c -= BUCKETS_INDEX2(1, 0))
{
fast_sint_t l = (fast_sint_t)buckets[c + BUCKETS_INDEX2(1, 1)] - (fast_sint_t)buckets[c + BUCKETS_INDEX2(0, 1)];
if (l > 0)
{
fast_sint_t i = buckets[c];
if (j - i > 0)
{
memset(&SA[i], 0, (size_t)(j - i) * sizeof(sa_sint_t));
}
memmove(&SA[j = (i - l)], &SA[m -= (sa_sint_t)l], (size_t)l * sizeof(sa_sint_t));
}
}
memset(&SA[0], 0, (size_t)j * sizeof(sa_sint_t));
}
static void libsais_place_lms_suffixes_interval_32s_1k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t k, sa_sint_t m, sa_sint_t * RESTRICT buckets)
{
const fast_sint_t prefetch_distance = 32;
sa_sint_t c = k - 1; fast_sint_t i, l = buckets[c];
for (i = (fast_sint_t)m - 1; i >= prefetch_distance + 3; i -= 4)
{
libsais_prefetch(&SA[i - 2 * prefetch_distance]);
libsais_prefetch(&T[SA[i - prefetch_distance - 0]]);
libsais_prefetch(&T[SA[i - prefetch_distance - 1]]);
libsais_prefetch(&T[SA[i - prefetch_distance - 2]]);
libsais_prefetch(&T[SA[i - prefetch_distance - 3]]);
sa_sint_t p0 = SA[i - 0]; if (T[p0] != c) { c = T[p0]; memset(&SA[buckets[c]], 0, (size_t)(l - buckets[c]) * sizeof(sa_sint_t)); l = buckets[c]; } SA[--l] = p0;
sa_sint_t p1 = SA[i - 1]; if (T[p1] != c) { c = T[p1]; memset(&SA[buckets[c]], 0, (size_t)(l - buckets[c]) * sizeof(sa_sint_t)); l = buckets[c]; } SA[--l] = p1;
sa_sint_t p2 = SA[i - 2]; if (T[p2] != c) { c = T[p2]; memset(&SA[buckets[c]], 0, (size_t)(l - buckets[c]) * sizeof(sa_sint_t)); l = buckets[c]; } SA[--l] = p2;
sa_sint_t p3 = SA[i - 3]; if (T[p3] != c) { c = T[p3]; memset(&SA[buckets[c]], 0, (size_t)(l - buckets[c]) * sizeof(sa_sint_t)); l = buckets[c]; } SA[--l] = p3;
}
for (; i >= 0; i -= 1)
{
sa_sint_t p = SA[i]; if (T[p] != c) { c = T[p]; memset(&SA[buckets[c]], 0, (size_t)(l - buckets[c]) * sizeof(sa_sint_t)); l = buckets[c]; } SA[--l] = p;
}
memset(&SA[0], 0, (size_t)l * sizeof(sa_sint_t));
}
static void libsais_place_lms_suffixes_histogram_32s_6k(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t m, const sa_sint_t * RESTRICT buckets)
{
const sa_sint_t * RESTRICT bucket_end = &buckets[5 * k];
fast_sint_t c, j = n;
for (c = (fast_sint_t)k - 2; c >= 0; --c)
{
fast_sint_t l = (fast_sint_t)buckets[BUCKETS_INDEX4(c, 1)];
if (l > 0)
{
fast_sint_t i = bucket_end[c];
if (j - i > 0)
{
memset(&SA[i], 0, (size_t)(j - i) * sizeof(sa_sint_t));
}
memmove(&SA[j = (i - l)], &SA[m -= (sa_sint_t)l], (size_t)l * sizeof(sa_sint_t));
}
}
memset(&SA[0], 0, (size_t)j * sizeof(sa_sint_t));
}
static void libsais_place_lms_suffixes_histogram_32s_4k(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t m, const sa_sint_t * RESTRICT buckets)
{
const sa_sint_t * RESTRICT bucket_end = &buckets[3 * k];
fast_sint_t c, j = n;
for (c = (fast_sint_t)k - 2; c >= 0; --c)
{
fast_sint_t l = (fast_sint_t)buckets[BUCKETS_INDEX2(c, 1)];
if (l > 0)
{
fast_sint_t i = bucket_end[c];
if (j - i > 0)
{
memset(&SA[i], 0, (size_t)(j - i) * sizeof(sa_sint_t));
}
memmove(&SA[j = (i - l)], &SA[m -= (sa_sint_t)l], (size_t)l * sizeof(sa_sint_t));
}
}
memset(&SA[0], 0, (size_t)j * sizeof(sa_sint_t));
}
static void libsais_place_lms_suffixes_histogram_32s_2k(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t m, const sa_sint_t * RESTRICT buckets)
{
fast_sint_t c, j = n;
for (c = BUCKETS_INDEX2((fast_sint_t)k - 2, 0); c >= BUCKETS_INDEX2(0, 0); c -= BUCKETS_INDEX2(1, 0))
{
fast_sint_t l = (fast_sint_t)buckets[c + BUCKETS_INDEX2(0, 1)];
if (l > 0)
{
fast_sint_t i = buckets[c];
if (j - i > 0)
{
memset(&SA[i], 0, (size_t)(j - i) * sizeof(sa_sint_t));
}
memmove(&SA[j = (i - l)], &SA[m -= (sa_sint_t)l], (size_t)l * sizeof(sa_sint_t));
}
}
memset(&SA[0], 0, (size_t)j * sizeof(sa_sint_t));
}
static void libsais_final_bwt_scan_left_to_right_8u(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2)
{
libsais_prefetchw(&SA[i + 2 * prefetch_distance]);
sa_sint_t s0 = SA[i + prefetch_distance + 0]; const uint8_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL);
sa_sint_t s1 = SA[i + prefetch_distance + 1]; const uint8_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL);
sa_sint_t p0 = SA[i + 0]; SA[i + 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; SA[i + 0] = T[p0] | SAINT_MIN; SA[induction_bucket[T[p0]]++] = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] < T[p0]) << (SAINT_BIT - 1)); }
sa_sint_t p1 = SA[i + 1]; SA[i + 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; SA[i + 1] = T[p1] | SAINT_MIN; SA[induction_bucket[T[p1]]++] = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] < T[p1]) << (SAINT_BIT - 1)); }
}
for (j += prefetch_distance + 1; i < j; i += 1)
{
sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { p--; SA[i] = T[p] | SAINT_MIN; SA[induction_bucket[T[p]]++] = p | ((sa_sint_t)(T[p - (p > 0)] < T[p]) << (SAINT_BIT - 1)); }
}
}
static void libsais_final_bwt_aux_scan_left_to_right_8u(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t rm, sa_sint_t * RESTRICT I, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2)
{
libsais_prefetchw(&SA[i + 2 * prefetch_distance]);
sa_sint_t s0 = SA[i + prefetch_distance + 0]; const uint8_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL);
sa_sint_t s1 = SA[i + prefetch_distance + 1]; const uint8_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL);
sa_sint_t p0 = SA[i + 0]; SA[i + 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; SA[i + 0] = T[p0] | SAINT_MIN; SA[induction_bucket[T[p0]]++] = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] < T[p0]) << (SAINT_BIT - 1)); if ((p0 & rm) == 0) { I[p0 / (rm + 1)] = induction_bucket[T[p0]]; }}
sa_sint_t p1 = SA[i + 1]; SA[i + 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; SA[i + 1] = T[p1] | SAINT_MIN; SA[induction_bucket[T[p1]]++] = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] < T[p1]) << (SAINT_BIT - 1)); if ((p1 & rm) == 0) { I[p1 / (rm + 1)] = induction_bucket[T[p1]]; }}
}
for (j += prefetch_distance + 1; i < j; i += 1)
{
sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { p--; SA[i] = T[p] | SAINT_MIN; SA[induction_bucket[T[p]]++] = p | ((sa_sint_t)(T[p - (p > 0)] < T[p]) << (SAINT_BIT - 1)); if ((p & rm) == 0) { I[p / (rm + 1)] = induction_bucket[T[p]]; } }
}
}
static void libsais_final_sorting_scan_left_to_right_8u(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2)
{
libsais_prefetchw(&SA[i + 2 * prefetch_distance]);
sa_sint_t s0 = SA[i + prefetch_distance + 0]; const uint8_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL);
sa_sint_t s1 = SA[i + prefetch_distance + 1]; const uint8_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL);
sa_sint_t p0 = SA[i + 0]; SA[i + 0] = p0 ^ SAINT_MIN; if (p0 > 0) { p0--; SA[induction_bucket[T[p0]]++] = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] < T[p0]) << (SAINT_BIT - 1)); }
sa_sint_t p1 = SA[i + 1]; SA[i + 1] = p1 ^ SAINT_MIN; if (p1 > 0) { p1--; SA[induction_bucket[T[p1]]++] = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] < T[p1]) << (SAINT_BIT - 1)); }
}
for (j += prefetch_distance + 1; i < j; i += 1)
{
sa_sint_t p = SA[i]; SA[i] = p ^ SAINT_MIN; if (p > 0) { p--; SA[induction_bucket[T[p]]++] = p | ((sa_sint_t)(T[p - (p > 0)] < T[p]) << (SAINT_BIT - 1)); }
}
}
static void libsais_final_sorting_scan_left_to_right_32s(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start, j = omp_block_start + omp_block_size - 2 * prefetch_distance - 1; i < j; i += 2)
{
libsais_prefetchw(&SA[i + 3 * prefetch_distance]);
sa_sint_t s0 = SA[i + 2 * prefetch_distance + 0]; const sa_sint_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL);
sa_sint_t s1 = SA[i + 2 * prefetch_distance + 1]; const sa_sint_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL);
sa_sint_t s2 = SA[i + 1 * prefetch_distance + 0]; if (s2 > 0) { libsais_prefetchw(&induction_bucket[T[s2 - 1]]); libsais_prefetch(&T[s2] - 2); }
sa_sint_t s3 = SA[i + 1 * prefetch_distance + 1]; if (s3 > 0) { libsais_prefetchw(&induction_bucket[T[s3 - 1]]); libsais_prefetch(&T[s3] - 2); }
sa_sint_t p0 = SA[i + 0]; SA[i + 0] = p0 ^ SAINT_MIN; if (p0 > 0) { p0--; SA[induction_bucket[T[p0]]++] = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] < T[p0]) << (SAINT_BIT - 1)); }
sa_sint_t p1 = SA[i + 1]; SA[i + 1] = p1 ^ SAINT_MIN; if (p1 > 0) { p1--; SA[induction_bucket[T[p1]]++] = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] < T[p1]) << (SAINT_BIT - 1)); }
}
for (j += 2 * prefetch_distance + 1; i < j; i += 1)
{
sa_sint_t p = SA[i]; SA[i] = p ^ SAINT_MIN; if (p > 0) { p--; SA[induction_bucket[T[p]]++] = p | ((sa_sint_t)(T[p - (p > 0)] < T[p]) << (SAINT_BIT - 1)); }
}
}
#if defined(_OPENMP)
static fast_sint_t libsais_final_bwt_scan_left_to_right_8u_block_prepare(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
memset(buckets, 0, ALPHABET_SIZE * sizeof(sa_sint_t));
fast_sint_t i, j, count = 0;
for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2)
{
libsais_prefetchw(&SA[i + 2 * prefetch_distance]);
sa_sint_t s0 = SA[i + prefetch_distance + 0]; const uint8_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL);
sa_sint_t s1 = SA[i + prefetch_distance + 1]; const uint8_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL);
sa_sint_t p0 = SA[i + 0]; SA[i + 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; SA[i + 0] = T[p0] | SAINT_MIN; buckets[cache[count].symbol = T[p0]]++; cache[count++].index = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] < T[p0]) << (SAINT_BIT - 1)); }
sa_sint_t p1 = SA[i + 1]; SA[i + 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; SA[i + 1] = T[p1] | SAINT_MIN; buckets[cache[count].symbol = T[p1]]++; cache[count++].index = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] < T[p1]) << (SAINT_BIT - 1)); }
}
for (j += prefetch_distance + 1; i < j; i += 1)
{
sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { p--; SA[i] = T[p] | SAINT_MIN; buckets[cache[count].symbol = T[p]]++; cache[count++].index = p | ((sa_sint_t)(T[p - (p > 0)] < T[p]) << (SAINT_BIT - 1)); }
}
return count;
}
static fast_sint_t libsais_final_sorting_scan_left_to_right_8u_block_prepare(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
memset(buckets, 0, ALPHABET_SIZE * sizeof(sa_sint_t));
fast_sint_t i, j, count = 0;
for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2)
{
libsais_prefetchw(&SA[i + 2 * prefetch_distance]);
sa_sint_t s0 = SA[i + prefetch_distance + 0]; const uint8_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL);
sa_sint_t s1 = SA[i + prefetch_distance + 1]; const uint8_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL);
sa_sint_t p0 = SA[i + 0]; SA[i + 0] = p0 ^ SAINT_MIN; if (p0 > 0) { p0--; buckets[cache[count].symbol = T[p0]]++; cache[count++].index = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] < T[p0]) << (SAINT_BIT - 1)); }
sa_sint_t p1 = SA[i + 1]; SA[i + 1] = p1 ^ SAINT_MIN; if (p1 > 0) { p1--; buckets[cache[count].symbol = T[p1]]++; cache[count++].index = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] < T[p1]) << (SAINT_BIT - 1)); }
}
for (j += prefetch_distance + 1; i < j; i += 1)
{
sa_sint_t p = SA[i]; SA[i] = p ^ SAINT_MIN; if (p > 0) { p--; buckets[cache[count].symbol = T[p]]++; cache[count++].index = p | ((sa_sint_t)(T[p - (p > 0)] < T[p]) << (SAINT_BIT - 1)); }
}
return count;
}
static void libsais_final_order_scan_left_to_right_8u_block_place(sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t count)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = 0, j = count - 3; i < j; i += 4)
{
libsais_prefetch(&cache[i + prefetch_distance]);
SA[buckets[cache[i + 0].symbol]++] = cache[i + 0].index;
SA[buckets[cache[i + 1].symbol]++] = cache[i + 1].index;
SA[buckets[cache[i + 2].symbol]++] = cache[i + 2].index;
SA[buckets[cache[i + 3].symbol]++] = cache[i + 3].index;
}
for (j += 3; i < j; i += 1)
{
SA[buckets[cache[i].symbol]++] = cache[i].index;
}
}
static void libsais_final_bwt_aux_scan_left_to_right_8u_block_place(sa_sint_t * RESTRICT SA, sa_sint_t rm, sa_sint_t * RESTRICT I, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t count)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = 0, j = count - 3; i < j; i += 4)
{
libsais_prefetch(&cache[i + prefetch_distance]);
SA[buckets[cache[i + 0].symbol]++] = cache[i + 0].index; if ((cache[i + 0].index & rm) == 0) { I[(cache[i + 0].index & SAINT_MAX) / (rm + 1)] = buckets[cache[i + 0].symbol]; }
SA[buckets[cache[i + 1].symbol]++] = cache[i + 1].index; if ((cache[i + 1].index & rm) == 0) { I[(cache[i + 1].index & SAINT_MAX) / (rm + 1)] = buckets[cache[i + 1].symbol]; }
SA[buckets[cache[i + 2].symbol]++] = cache[i + 2].index; if ((cache[i + 2].index & rm) == 0) { I[(cache[i + 2].index & SAINT_MAX) / (rm + 1)] = buckets[cache[i + 2].symbol]; }
SA[buckets[cache[i + 3].symbol]++] = cache[i + 3].index; if ((cache[i + 3].index & rm) == 0) { I[(cache[i + 3].index & SAINT_MAX) / (rm + 1)] = buckets[cache[i + 3].symbol]; }
}
for (j += 3; i < j; i += 1)
{
SA[buckets[cache[i].symbol]++] = cache[i].index; if ((cache[i].index & rm) == 0) { I[(cache[i].index & SAINT_MAX) / (rm + 1)] = buckets[cache[i].symbol]; }
}
}
static void libsais_final_sorting_scan_left_to_right_32s_block_gather(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2)
{
libsais_prefetchw(&SA[i + 2 * prefetch_distance]);
sa_sint_t s0 = SA[i + prefetch_distance + 0]; const sa_sint_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL);
sa_sint_t s1 = SA[i + prefetch_distance + 1]; const sa_sint_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL);
libsais_prefetchw(&cache[i + prefetch_distance]);
sa_sint_t symbol0 = SAINT_MIN, p0 = SA[i + 0]; SA[i + 0] = p0 ^ SAINT_MIN; if (p0 > 0) { p0--; cache[i + 0].index = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] < T[p0]) << (SAINT_BIT - 1)); symbol0 = T[p0]; } cache[i + 0].symbol = symbol0;
sa_sint_t symbol1 = SAINT_MIN, p1 = SA[i + 1]; SA[i + 1] = p1 ^ SAINT_MIN; if (p1 > 0) { p1--; cache[i + 1].index = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] < T[p1]) << (SAINT_BIT - 1)); symbol1 = T[p1]; } cache[i + 1].symbol = symbol1;
}
for (j += prefetch_distance + 1; i < j; i += 1)
{
sa_sint_t symbol = SAINT_MIN, p = SA[i]; SA[i] = p ^ SAINT_MIN; if (p > 0) { p--; cache[i].index = p | ((sa_sint_t)(T[p - (p > 0)] < T[p]) << (SAINT_BIT - 1)); symbol = T[p]; } cache[i].symbol = symbol;
}
}
static void libsais_final_sorting_scan_left_to_right_32s_block_sort(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT induction_bucket, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j, omp_block_end = omp_block_start + omp_block_size;
for (i = omp_block_start, j = omp_block_end - prefetch_distance - 1; i < j; i += 2)
{
libsais_prefetchw(&cache[i + 2 * prefetch_distance]);
sa_sint_t s0 = cache[i + prefetch_distance + 0].symbol; const sa_sint_t * Is0 = &induction_bucket[s0]; libsais_prefetchw(s0 >= 0 ? Is0 : NULL);
sa_sint_t s1 = cache[i + prefetch_distance + 1].symbol; const sa_sint_t * Is1 = &induction_bucket[s1]; libsais_prefetchw(s1 >= 0 ? Is1 : NULL);
sa_sint_t v0 = cache[i + 0].symbol;
if (v0 >= 0)
{
cache[i + 0].symbol = induction_bucket[v0]++;
if (cache[i + 0].symbol < omp_block_end) { sa_sint_t ni = cache[i + 0].symbol, np = cache[i + 0].index; cache[i + 0].index = np ^ SAINT_MIN; if (np > 0) { np--; cache[ni].index = np | ((sa_sint_t)(T[np - (np > 0)] < T[np]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np]; } }
}
sa_sint_t v1 = cache[i + 1].symbol;
if (v1 >= 0)
{
cache[i + 1].symbol = induction_bucket[v1]++;
if (cache[i + 1].symbol < omp_block_end) { sa_sint_t ni = cache[i + 1].symbol, np = cache[i + 1].index; cache[i + 1].index = np ^ SAINT_MIN; if (np > 0) { np--; cache[ni].index = np | ((sa_sint_t)(T[np - (np > 0)] < T[np]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np]; } }
}
}
for (j += prefetch_distance + 1; i < j; i += 1)
{
sa_sint_t v = cache[i].symbol;
if (v >= 0)
{
cache[i].symbol = induction_bucket[v]++;
if (cache[i].symbol < omp_block_end) { sa_sint_t ni = cache[i].symbol, np = cache[i].index; cache[i].index = np ^ SAINT_MIN; if (np > 0) { np--; cache[ni].index = np | ((sa_sint_t)(T[np - (np > 0)] < T[np]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np]; } }
}
}
}
static void libsais_final_bwt_scan_left_to_right_8u_block_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384 && omp_get_dynamic() == 0)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(thread_state);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start;
omp_block_start += block_start;
if (omp_num_threads == 1)
{
libsais_final_bwt_scan_left_to_right_8u(T, SA, induction_bucket, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
thread_state[omp_thread_num].state.count = libsais_final_bwt_scan_left_to_right_8u_block_prepare(T, SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, omp_block_start, omp_block_size);
}
#pragma omp barrier
#pragma omp master
{
fast_sint_t t;
for (t = 0; t < omp_num_threads; ++t)
{
sa_sint_t * RESTRICT temp_bucket = thread_state[t].state.buckets;
fast_sint_t c; for (c = 0; c < ALPHABET_SIZE; c += 1) { sa_sint_t A = induction_bucket[c], B = temp_bucket[c]; induction_bucket[c] = A + B; temp_bucket[c] = A; }
}
}
#pragma omp barrier
{
libsais_final_order_scan_left_to_right_8u_block_place(SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, thread_state[omp_thread_num].state.count);
}
}
#endif
}
}
static void libsais_final_bwt_aux_scan_left_to_right_8u_block_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t rm, sa_sint_t * RESTRICT I, sa_sint_t * RESTRICT induction_bucket, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384 && omp_get_dynamic() == 0)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(thread_state);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start;
omp_block_start += block_start;
if (omp_num_threads == 1)
{
libsais_final_bwt_aux_scan_left_to_right_8u(T, SA, rm, I, induction_bucket, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
thread_state[omp_thread_num].state.count = libsais_final_bwt_scan_left_to_right_8u_block_prepare(T, SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, omp_block_start, omp_block_size);
}
#pragma omp barrier
#pragma omp master
{
fast_sint_t t;
for (t = 0; t < omp_num_threads; ++t)
{
sa_sint_t * RESTRICT temp_bucket = thread_state[t].state.buckets;
fast_sint_t c; for (c = 0; c < ALPHABET_SIZE; c += 1) { sa_sint_t A = induction_bucket[c], B = temp_bucket[c]; induction_bucket[c] = A + B; temp_bucket[c] = A; }
}
}
#pragma omp barrier
{
libsais_final_bwt_aux_scan_left_to_right_8u_block_place(SA, rm, I, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, thread_state[omp_thread_num].state.count);
}
}
#endif
}
}
static void libsais_final_sorting_scan_left_to_right_8u_block_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384 && omp_get_dynamic() == 0)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(thread_state);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start;
omp_block_start += block_start;
if (omp_num_threads == 1)
{
libsais_final_sorting_scan_left_to_right_8u(T, SA, induction_bucket, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
thread_state[omp_thread_num].state.count = libsais_final_sorting_scan_left_to_right_8u_block_prepare(T, SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, omp_block_start, omp_block_size);
}
#pragma omp barrier
#pragma omp master
{
fast_sint_t t;
for (t = 0; t < omp_num_threads; ++t)
{
sa_sint_t * RESTRICT temp_bucket = thread_state[t].state.buckets;
fast_sint_t c; for (c = 0; c < ALPHABET_SIZE; c += 1) { sa_sint_t A = induction_bucket[c], B = temp_bucket[c]; induction_bucket[c] = A + B; temp_bucket[c] = A; }
}
}
#pragma omp barrier
{
libsais_final_order_scan_left_to_right_8u_block_place(SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, thread_state[omp_thread_num].state.count);
}
}
#endif
}
}
static void libsais_final_sorting_scan_left_to_right_32s_block_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(cache);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start;
omp_block_start += block_start;
if (omp_num_threads == 1)
{
libsais_final_sorting_scan_left_to_right_32s(T, SA, buckets, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
libsais_final_sorting_scan_left_to_right_32s_block_gather(T, SA, cache - block_start, omp_block_start, omp_block_size);
}
#pragma omp barrier
#pragma omp master
{
libsais_final_sorting_scan_left_to_right_32s_block_sort(T, buckets, cache - block_start, block_start, block_size);
}
#pragma omp barrier
{
libsais_compact_and_place_cached_suffixes(SA, cache - block_start, omp_block_start, omp_block_size);
}
}
#endif
}
}
#endif
static void libsais_final_bwt_scan_left_to_right_8u_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, fast_sint_t n, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
SA[induction_bucket[T[(sa_sint_t)n - 1]]++] = ((sa_sint_t)n - 1) | ((sa_sint_t)(T[(sa_sint_t)n - 2] < T[(sa_sint_t)n - 1]) << (SAINT_BIT - 1));
if (threads == 1 || n < 65536)
{
libsais_final_bwt_scan_left_to_right_8u(T, SA, induction_bucket, 0, n);
}
#if defined(_OPENMP)
else
{
fast_sint_t block_start;
for (block_start = 0; block_start < n; )
{
if (SA[block_start] == 0)
{
block_start++;
}
else
{
fast_sint_t block_max_end = block_start + ((fast_sint_t)threads) * (LIBSAIS_PER_THREAD_CACHE_SIZE - 16 * (fast_sint_t)threads); if (block_max_end > n) { block_max_end = n;}
fast_sint_t block_end = block_start + 1; while (block_end < block_max_end && SA[block_end] != 0) { block_end++; }
fast_sint_t block_size = block_end - block_start;
if (block_size < 32)
{
for (; block_start < block_end; block_start += 1)
{
sa_sint_t p = SA[block_start]; SA[block_start] = p & SAINT_MAX; if (p > 0) { p--; SA[block_start] = T[p] | SAINT_MIN; SA[induction_bucket[T[p]]++] = p | ((sa_sint_t)(T[p - (p > 0)] < T[p]) << (SAINT_BIT - 1)); }
}
}
else
{
libsais_final_bwt_scan_left_to_right_8u_block_omp(T, SA, induction_bucket, block_start, block_size, threads, thread_state);
block_start = block_end;
}
}
}
}
#else
UNUSED(thread_state);
#endif
}
static void libsais_final_bwt_aux_scan_left_to_right_8u_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, fast_sint_t n, sa_sint_t rm, sa_sint_t * RESTRICT I, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
SA[induction_bucket[T[(sa_sint_t)n - 1]]++] = ((sa_sint_t)n - 1) | ((sa_sint_t)(T[(sa_sint_t)n - 2] < T[(sa_sint_t)n - 1]) << (SAINT_BIT - 1));
if ((((sa_sint_t)n - 1) & rm) == 0) { I[((sa_sint_t)n - 1) / (rm + 1)] = induction_bucket[T[(sa_sint_t)n - 1]]; }
if (threads == 1 || n < 65536)
{
libsais_final_bwt_aux_scan_left_to_right_8u(T, SA, rm, I, induction_bucket, 0, n);
}
#if defined(_OPENMP)
else
{
fast_sint_t block_start;
for (block_start = 0; block_start < n; )
{
if (SA[block_start] == 0)
{
block_start++;
}
else
{
fast_sint_t block_max_end = block_start + ((fast_sint_t)threads) * (LIBSAIS_PER_THREAD_CACHE_SIZE - 16 * (fast_sint_t)threads); if (block_max_end > n) { block_max_end = n;}
fast_sint_t block_end = block_start + 1; while (block_end < block_max_end && SA[block_end] != 0) { block_end++; }
fast_sint_t block_size = block_end - block_start;
if (block_size < 32)
{
for (; block_start < block_end; block_start += 1)
{
sa_sint_t p = SA[block_start]; SA[block_start] = p & SAINT_MAX; if (p > 0) { p--; SA[block_start] = T[p] | SAINT_MIN; SA[induction_bucket[T[p]]++] = p | ((sa_sint_t)(T[p - (p > 0)] < T[p]) << (SAINT_BIT - 1)); if ((p & rm) == 0) { I[p / (rm + 1)] = induction_bucket[T[p]]; } }
}
}
else
{
libsais_final_bwt_aux_scan_left_to_right_8u_block_omp(T, SA, rm, I, induction_bucket, block_start, block_size, threads, thread_state);
block_start = block_end;
}
}
}
}
#else
UNUSED(thread_state);
#endif
}
static void libsais_final_sorting_scan_left_to_right_8u_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, fast_sint_t n, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
SA[induction_bucket[T[(sa_sint_t)n - 1]]++] = ((sa_sint_t)n - 1) | ((sa_sint_t)(T[(sa_sint_t)n - 2] < T[(sa_sint_t)n - 1]) << (SAINT_BIT - 1));
if (threads == 1 || n < 65536)
{
libsais_final_sorting_scan_left_to_right_8u(T, SA, induction_bucket, 0, n);
}
#if defined(_OPENMP)
else
{
fast_sint_t block_start;
for (block_start = 0; block_start < n; )
{
if (SA[block_start] == 0)
{
block_start++;
}
else
{
fast_sint_t block_max_end = block_start + ((fast_sint_t)threads) * (LIBSAIS_PER_THREAD_CACHE_SIZE - 16 * (fast_sint_t)threads); if (block_max_end > n) { block_max_end = n;}
fast_sint_t block_end = block_start + 1; while (block_end < block_max_end && SA[block_end] != 0) { block_end++; }
fast_sint_t block_size = block_end - block_start;
if (block_size < 32)
{
for (; block_start < block_end; block_start += 1)
{
sa_sint_t p = SA[block_start]; SA[block_start] = p ^ SAINT_MIN; if (p > 0) { p--; SA[induction_bucket[T[p]]++] = p | ((sa_sint_t)(T[p - (p > 0)] < T[p]) << (SAINT_BIT - 1)); }
}
}
else
{
libsais_final_sorting_scan_left_to_right_8u_block_omp(T, SA, induction_bucket, block_start, block_size, threads, thread_state);
block_start = block_end;
}
}
}
}
#else
UNUSED(thread_state);
#endif
}
static void libsais_final_sorting_scan_left_to_right_32s_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
SA[induction_bucket[T[n - 1]]++] = (n - 1) | ((sa_sint_t)(T[n - 2] < T[n - 1]) << (SAINT_BIT - 1));
if (threads == 1 || n < 65536)
{
libsais_final_sorting_scan_left_to_right_32s(T, SA, induction_bucket, 0, n);
}
#if defined(_OPENMP)
else
{
fast_sint_t block_start, block_end;
for (block_start = 0; block_start < n; block_start = block_end)
{
block_end = block_start + (fast_sint_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE; if (block_end > n) { block_end = n; }
libsais_final_sorting_scan_left_to_right_32s_block_omp(T, SA, induction_bucket, thread_state[0].state.cache, block_start, block_end - block_start, threads);
}
}
#else
UNUSED(thread_state);
#endif
}
static sa_sint_t libsais_final_bwt_scan_right_to_left_8u(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j; sa_sint_t index = -1;
for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2)
{
libsais_prefetchw(&SA[i - 2 * prefetch_distance]);
sa_sint_t s0 = SA[i - prefetch_distance - 0]; const uint8_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL);
sa_sint_t s1 = SA[i - prefetch_distance - 1]; const uint8_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL);
sa_sint_t p0 = SA[i - 0]; index = (p0 == 0) ? (sa_sint_t)(i - 0) : index;
SA[i - 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; uint8_t c0 = T[p0 - (p0 > 0)], c1 = T[p0]; SA[i - 0] = c1; sa_sint_t t = c0 | SAINT_MIN; SA[--induction_bucket[c1]] = (c0 <= c1) ? p0 : t; }
sa_sint_t p1 = SA[i - 1]; index = (p1 == 0) ? (sa_sint_t)(i - 1) : index;
SA[i - 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; uint8_t c0 = T[p1 - (p1 > 0)], c1 = T[p1]; SA[i - 1] = c1; sa_sint_t t = c0 | SAINT_MIN; SA[--induction_bucket[c1]] = (c0 <= c1) ? p1 : t; }
}
for (j -= prefetch_distance + 1; i >= j; i -= 1)
{
sa_sint_t p = SA[i]; index = (p == 0) ? (sa_sint_t)i : index;
SA[i] = p & SAINT_MAX; if (p > 0) { p--; uint8_t c0 = T[p - (p > 0)], c1 = T[p]; SA[i] = c1; sa_sint_t t = c0 | SAINT_MIN; SA[--induction_bucket[c1]] = (c0 <= c1) ? p : t; }
}
return index;
}
static void libsais_final_bwt_aux_scan_right_to_left_8u(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t rm, sa_sint_t * RESTRICT I, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2)
{
libsais_prefetchw(&SA[i - 2 * prefetch_distance]);
sa_sint_t s0 = SA[i - prefetch_distance - 0]; const uint8_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL);
sa_sint_t s1 = SA[i - prefetch_distance - 1]; const uint8_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL);
sa_sint_t p0 = SA[i - 0];
SA[i - 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; uint8_t c0 = T[p0 - (p0 > 0)], c1 = T[p0]; SA[i - 0] = c1; sa_sint_t t = c0 | SAINT_MIN; SA[--induction_bucket[c1]] = (c0 <= c1) ? p0 : t; if ((p0 & rm) == 0) { I[p0 / (rm + 1)] = induction_bucket[T[p0]] + 1; } }
sa_sint_t p1 = SA[i - 1];
SA[i - 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; uint8_t c0 = T[p1 - (p1 > 0)], c1 = T[p1]; SA[i - 1] = c1; sa_sint_t t = c0 | SAINT_MIN; SA[--induction_bucket[c1]] = (c0 <= c1) ? p1 : t; if ((p1 & rm) == 0) { I[p1 / (rm + 1)] = induction_bucket[T[p1]] + 1; } }
}
for (j -= prefetch_distance + 1; i >= j; i -= 1)
{
sa_sint_t p = SA[i];
SA[i] = p & SAINT_MAX; if (p > 0) { p--; uint8_t c0 = T[p - (p > 0)], c1 = T[p]; SA[i] = c1; sa_sint_t t = c0 | SAINT_MIN; SA[--induction_bucket[c1]] = (c0 <= c1) ? p : t; if ((p & rm) == 0) { I[p / (rm + 1)] = induction_bucket[T[p]] + 1; } }
}
}
static void libsais_final_sorting_scan_right_to_left_8u(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2)
{
libsais_prefetchw(&SA[i - 2 * prefetch_distance]);
sa_sint_t s0 = SA[i - prefetch_distance - 0]; const uint8_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL);
sa_sint_t s1 = SA[i - prefetch_distance - 1]; const uint8_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL);
sa_sint_t p0 = SA[i - 0]; SA[i - 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; SA[--induction_bucket[T[p0]]] = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] > T[p0]) << (SAINT_BIT - 1)); }
sa_sint_t p1 = SA[i - 1]; SA[i - 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; SA[--induction_bucket[T[p1]]] = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] > T[p1]) << (SAINT_BIT - 1)); }
}
for (j -= prefetch_distance + 1; i >= j; i -= 1)
{
sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { p--; SA[--induction_bucket[T[p]]] = p | ((sa_sint_t)(T[p - (p > 0)] > T[p]) << (SAINT_BIT - 1)); }
}
}
static void libsais_final_sorting_scan_right_to_left_32s(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + 2 * prefetch_distance + 1; i >= j; i -= 2)
{
libsais_prefetchw(&SA[i - 3 * prefetch_distance]);
sa_sint_t s0 = SA[i - 2 * prefetch_distance - 0]; const sa_sint_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL);
sa_sint_t s1 = SA[i - 2 * prefetch_distance - 1]; const sa_sint_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL);
sa_sint_t s2 = SA[i - 1 * prefetch_distance - 0]; if (s2 > 0) { libsais_prefetchw(&induction_bucket[T[s2 - 1]]); libsais_prefetch(&T[s2] - 2); }
sa_sint_t s3 = SA[i - 1 * prefetch_distance - 1]; if (s3 > 0) { libsais_prefetchw(&induction_bucket[T[s3 - 1]]); libsais_prefetch(&T[s3] - 2); }
sa_sint_t p0 = SA[i - 0]; SA[i - 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; SA[--induction_bucket[T[p0]]] = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] > T[p0]) << (SAINT_BIT - 1)); }
sa_sint_t p1 = SA[i - 1]; SA[i - 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; SA[--induction_bucket[T[p1]]] = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] > T[p1]) << (SAINT_BIT - 1)); }
}
for (j -= 2 * prefetch_distance + 1; i >= j; i -= 1)
{
sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { p--; SA[--induction_bucket[T[p]]] = p | ((sa_sint_t)(T[p - (p > 0)] > T[p]) << (SAINT_BIT - 1)); }
}
}
#if defined(_OPENMP)
static fast_sint_t libsais_final_bwt_scan_right_to_left_8u_block_prepare(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
memset(buckets, 0, ALPHABET_SIZE * sizeof(sa_sint_t));
fast_sint_t i, j, count = 0;
for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2)
{
libsais_prefetchw(&SA[i - 2 * prefetch_distance]);
sa_sint_t s0 = SA[i - prefetch_distance - 0]; const uint8_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL);
sa_sint_t s1 = SA[i - prefetch_distance - 1]; const uint8_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL);
sa_sint_t p0 = SA[i - 0]; SA[i - 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; uint8_t c0 = T[p0 - (p0 > 0)], c1 = T[p0]; SA[i - 0] = c1; sa_sint_t t = c0 | SAINT_MIN; buckets[cache[count].symbol = c1]++; cache[count++].index = (c0 <= c1) ? p0 : t; }
sa_sint_t p1 = SA[i - 1]; SA[i - 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; uint8_t c0 = T[p1 - (p1 > 0)], c1 = T[p1]; SA[i - 1] = c1; sa_sint_t t = c0 | SAINT_MIN; buckets[cache[count].symbol = c1]++; cache[count++].index = (c0 <= c1) ? p1 : t; }
}
for (j -= prefetch_distance + 1; i >= j; i -= 1)
{
sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { p--; uint8_t c0 = T[p - (p > 0)], c1 = T[p]; SA[i] = c1; sa_sint_t t = c0 | SAINT_MIN; buckets[cache[count].symbol = c1]++; cache[count++].index = (c0 <= c1) ? p : t; }
}
return count;
}
static fast_sint_t libsais_final_bwt_aux_scan_right_to_left_8u_block_prepare(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
memset(buckets, 0, ALPHABET_SIZE * sizeof(sa_sint_t));
fast_sint_t i, j, count = 0;
for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2)
{
libsais_prefetchw(&SA[i - 2 * prefetch_distance]);
sa_sint_t s0 = SA[i - prefetch_distance - 0]; const uint8_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL);
sa_sint_t s1 = SA[i - prefetch_distance - 1]; const uint8_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL);
sa_sint_t p0 = SA[i - 0]; SA[i - 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; uint8_t c0 = T[p0 - (p0 > 0)], c1 = T[p0]; SA[i - 0] = c1; sa_sint_t t = c0 | SAINT_MIN; buckets[cache[count].symbol = c1]++; cache[count].index = (c0 <= c1) ? p0 : t; cache[count + 1].index = p0; count += 2; }
sa_sint_t p1 = SA[i - 1]; SA[i - 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; uint8_t c0 = T[p1 - (p1 > 0)], c1 = T[p1]; SA[i - 1] = c1; sa_sint_t t = c0 | SAINT_MIN; buckets[cache[count].symbol = c1]++; cache[count].index = (c0 <= c1) ? p1 : t; cache[count + 1].index = p1; count += 2; }
}
for (j -= prefetch_distance + 1; i >= j; i -= 1)
{
sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { p--; uint8_t c0 = T[p - (p > 0)], c1 = T[p]; SA[i] = c1; sa_sint_t t = c0 | SAINT_MIN; buckets[cache[count].symbol = c1]++; cache[count].index = (c0 <= c1) ? p : t; cache[count + 1].index = p; count += 2; }
}
return count;
}
static fast_sint_t libsais_final_sorting_scan_right_to_left_8u_block_prepare(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
memset(buckets, 0, ALPHABET_SIZE * sizeof(sa_sint_t));
fast_sint_t i, j, count = 0;
for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2)
{
libsais_prefetchw(&SA[i - 2 * prefetch_distance]);
sa_sint_t s0 = SA[i - prefetch_distance - 0]; const uint8_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL);
sa_sint_t s1 = SA[i - prefetch_distance - 1]; const uint8_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL);
sa_sint_t p0 = SA[i - 0]; SA[i - 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; buckets[cache[count].symbol = T[p0]]++; cache[count++].index = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] > T[p0]) << (SAINT_BIT - 1)); }
sa_sint_t p1 = SA[i - 1]; SA[i - 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; buckets[cache[count].symbol = T[p1]]++; cache[count++].index = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] > T[p1]) << (SAINT_BIT - 1)); }
}
for (j -= prefetch_distance + 1; i >= j; i -= 1)
{
sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { p--; buckets[cache[count].symbol = T[p]]++; cache[count++].index = p | ((sa_sint_t)(T[p - (p > 0)] > T[p]) << (SAINT_BIT - 1)); }
}
return count;
}
static void libsais_final_order_scan_right_to_left_8u_block_place(sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t count)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = 0, j = count - 3; i < j; i += 4)
{
libsais_prefetch(&cache[i + prefetch_distance]);
SA[--buckets[cache[i + 0].symbol]] = cache[i + 0].index;
SA[--buckets[cache[i + 1].symbol]] = cache[i + 1].index;
SA[--buckets[cache[i + 2].symbol]] = cache[i + 2].index;
SA[--buckets[cache[i + 3].symbol]] = cache[i + 3].index;
}
for (j += 3; i < j; i += 1)
{
SA[--buckets[cache[i].symbol]] = cache[i].index;
}
}
static void libsais_final_bwt_aux_scan_right_to_left_8u_block_place(sa_sint_t * RESTRICT SA, sa_sint_t rm, sa_sint_t * RESTRICT I, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t count)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = 0, j = count - 6; i < j; i += 8)
{
libsais_prefetch(&cache[i + prefetch_distance]);
SA[--buckets[cache[i + 0].symbol]] = cache[i + 0].index; if ((cache[i + 1].index & rm) == 0) { I[cache[i + 1].index / (rm + 1)] = buckets[cache[i + 0].symbol] + 1; }
SA[--buckets[cache[i + 2].symbol]] = cache[i + 2].index; if ((cache[i + 3].index & rm) == 0) { I[cache[i + 3].index / (rm + 1)] = buckets[cache[i + 2].symbol] + 1; }
SA[--buckets[cache[i + 4].symbol]] = cache[i + 4].index; if ((cache[i + 5].index & rm) == 0) { I[cache[i + 5].index / (rm + 1)] = buckets[cache[i + 4].symbol] + 1; }
SA[--buckets[cache[i + 6].symbol]] = cache[i + 6].index; if ((cache[i + 7].index & rm) == 0) { I[cache[i + 7].index / (rm + 1)] = buckets[cache[i + 6].symbol] + 1; }
}
for (j += 6; i < j; i += 2)
{
SA[--buckets[cache[i].symbol]] = cache[i].index; if ((cache[i + 1].index & rm) == 0) { I[(cache[i + 1].index & SAINT_MAX) / (rm + 1)] = buckets[cache[i].symbol] + 1; }
}
}
static void libsais_final_sorting_scan_right_to_left_32s_block_gather(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2)
{
libsais_prefetchw(&SA[i + 2 * prefetch_distance]);
sa_sint_t s0 = SA[i + prefetch_distance + 0]; const sa_sint_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL);
sa_sint_t s1 = SA[i + prefetch_distance + 1]; const sa_sint_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL);
libsais_prefetchw(&cache[i + prefetch_distance]);
sa_sint_t symbol0 = SAINT_MIN, p0 = SA[i + 0]; SA[i + 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; cache[i + 0].index = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] > T[p0]) << (SAINT_BIT - 1)); symbol0 = T[p0]; } cache[i + 0].symbol = symbol0;
sa_sint_t symbol1 = SAINT_MIN, p1 = SA[i + 1]; SA[i + 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; cache[i + 1].index = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] > T[p1]) << (SAINT_BIT - 1)); symbol1 = T[p1]; } cache[i + 1].symbol = symbol1;
}
for (j += prefetch_distance + 1; i < j; i += 1)
{
sa_sint_t symbol = SAINT_MIN, p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { p--; cache[i].index = p | ((sa_sint_t)(T[p - (p > 0)] > T[p]) << (SAINT_BIT - 1)); symbol = T[p]; } cache[i].symbol = symbol;
}
}
static void libsais_final_sorting_scan_right_to_left_32s_block_sort(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT induction_bucket, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2)
{
libsais_prefetchw(&cache[i - 2 * prefetch_distance]);
sa_sint_t s0 = cache[i - prefetch_distance - 0].symbol; const sa_sint_t * Is0 = &induction_bucket[s0]; libsais_prefetchw(s0 >= 0 ? Is0 : NULL);
sa_sint_t s1 = cache[i - prefetch_distance - 1].symbol; const sa_sint_t * Is1 = &induction_bucket[s1]; libsais_prefetchw(s1 >= 0 ? Is1 : NULL);
sa_sint_t v0 = cache[i - 0].symbol;
if (v0 >= 0)
{
cache[i - 0].symbol = --induction_bucket[v0];
if (cache[i - 0].symbol >= omp_block_start) { sa_sint_t ni = cache[i - 0].symbol, np = cache[i - 0].index; cache[i - 0].index = np & SAINT_MAX; if (np > 0) { np--; cache[ni].index = np | ((sa_sint_t)(T[np - (np > 0)] > T[np]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np]; } }
}
sa_sint_t v1 = cache[i - 1].symbol;
if (v1 >= 0)
{
cache[i - 1].symbol = --induction_bucket[v1];
if (cache[i - 1].symbol >= omp_block_start) { sa_sint_t ni = cache[i - 1].symbol, np = cache[i - 1].index; cache[i - 1].index = np & SAINT_MAX; if (np > 0) { np--; cache[ni].index = np | ((sa_sint_t)(T[np - (np > 0)] > T[np]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np]; } }
}
}
for (j -= prefetch_distance + 1; i >= j; i -= 1)
{
sa_sint_t v = cache[i].symbol;
if (v >= 0)
{
cache[i].symbol = --induction_bucket[v];
if (cache[i].symbol >= omp_block_start) { sa_sint_t ni = cache[i].symbol, np = cache[i].index; cache[i].index = np & SAINT_MAX; if (np > 0) { np--; cache[ni].index = np | ((sa_sint_t)(T[np - (np > 0)] > T[np]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np]; } }
}
}
}
static void libsais_final_bwt_scan_right_to_left_8u_block_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384 && omp_get_dynamic() == 0)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(thread_state);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start;
omp_block_start += block_start;
if (omp_num_threads == 1)
{
libsais_final_bwt_scan_right_to_left_8u(T, SA, induction_bucket, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
thread_state[omp_thread_num].state.count = libsais_final_bwt_scan_right_to_left_8u_block_prepare(T, SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, omp_block_start, omp_block_size);
}
#pragma omp barrier
#pragma omp master
{
fast_sint_t t;
for (t = omp_num_threads - 1; t >= 0; --t)
{
sa_sint_t * RESTRICT temp_bucket = thread_state[t].state.buckets;
fast_sint_t c; for (c = 0; c < ALPHABET_SIZE; c += 1) { sa_sint_t A = induction_bucket[c], B = temp_bucket[c]; induction_bucket[c] = A - B; temp_bucket[c] = A; }
}
}
#pragma omp barrier
{
libsais_final_order_scan_right_to_left_8u_block_place(SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, thread_state[omp_thread_num].state.count);
}
}
#endif
}
}
static void libsais_final_bwt_aux_scan_right_to_left_8u_block_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t rm, sa_sint_t * RESTRICT I, sa_sint_t * RESTRICT induction_bucket, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384 && omp_get_dynamic() == 0)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(thread_state);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start;
omp_block_start += block_start;
if (omp_num_threads == 1)
{
libsais_final_bwt_aux_scan_right_to_left_8u(T, SA, rm, I, induction_bucket, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
thread_state[omp_thread_num].state.count = libsais_final_bwt_aux_scan_right_to_left_8u_block_prepare(T, SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, omp_block_start, omp_block_size);
}
#pragma omp barrier
#pragma omp master
{
fast_sint_t t;
for (t = omp_num_threads - 1; t >= 0; --t)
{
sa_sint_t * RESTRICT temp_bucket = thread_state[t].state.buckets;
fast_sint_t c; for (c = 0; c < ALPHABET_SIZE; c += 1) { sa_sint_t A = induction_bucket[c], B = temp_bucket[c]; induction_bucket[c] = A - B; temp_bucket[c] = A; }
}
}
#pragma omp barrier
{
libsais_final_bwt_aux_scan_right_to_left_8u_block_place(SA, rm, I, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, thread_state[omp_thread_num].state.count);
}
}
#endif
}
}
static void libsais_final_sorting_scan_right_to_left_8u_block_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384 && omp_get_dynamic() == 0)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(thread_state);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start;
omp_block_start += block_start;
if (omp_num_threads == 1)
{
libsais_final_sorting_scan_right_to_left_8u(T, SA, induction_bucket, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
thread_state[omp_thread_num].state.count = libsais_final_sorting_scan_right_to_left_8u_block_prepare(T, SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, omp_block_start, omp_block_size);
}
#pragma omp barrier
#pragma omp master
{
fast_sint_t t;
for (t = omp_num_threads - 1; t >= 0; --t)
{
sa_sint_t * RESTRICT temp_bucket = thread_state[t].state.buckets;
fast_sint_t c; for (c = 0; c < ALPHABET_SIZE; c += 1) { sa_sint_t A = induction_bucket[c], B = temp_bucket[c]; induction_bucket[c] = A - B; temp_bucket[c] = A; }
}
}
#pragma omp barrier
{
libsais_final_order_scan_right_to_left_8u_block_place(SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, thread_state[omp_thread_num].state.count);
}
}
#endif
}
}
static void libsais_final_sorting_scan_right_to_left_32s_block_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(cache);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start;
omp_block_start += block_start;
if (omp_num_threads == 1)
{
libsais_final_sorting_scan_right_to_left_32s(T, SA, buckets, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
libsais_final_sorting_scan_right_to_left_32s_block_gather(T, SA, cache - block_start, omp_block_start, omp_block_size);
}
#pragma omp barrier
#pragma omp master
{
libsais_final_sorting_scan_right_to_left_32s_block_sort(T, buckets, cache - block_start, block_start, block_size);
}
#pragma omp barrier
{
libsais_compact_and_place_cached_suffixes(SA, cache - block_start, omp_block_start, omp_block_size);
}
}
#endif
}
}
#endif
static sa_sint_t libsais_final_bwt_scan_right_to_left_8u_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
sa_sint_t index = -1;
if (threads == 1 || n < 65536)
{
index = libsais_final_bwt_scan_right_to_left_8u(T, SA, induction_bucket, 0, n);
}
#if defined(_OPENMP)
else
{
fast_sint_t block_start;
for (block_start = (fast_sint_t)n - 1; block_start >= 0; )
{
if (SA[block_start] == 0)
{
index = (sa_sint_t)block_start--;
}
else
{
fast_sint_t block_max_end = block_start - ((fast_sint_t)threads) * (LIBSAIS_PER_THREAD_CACHE_SIZE - 16 * (fast_sint_t)threads); if (block_max_end < 0) { block_max_end = -1; }
fast_sint_t block_end = block_start - 1; while (block_end > block_max_end && SA[block_end] != 0) { block_end--; }
fast_sint_t block_size = block_start - block_end;
if (block_size < 32)
{
for (; block_start > block_end; block_start -= 1)
{
sa_sint_t p = SA[block_start]; SA[block_start] = p & SAINT_MAX; if (p > 0) { p--; uint8_t c0 = T[p - (p > 0)], c1 = T[p]; SA[block_start] = c1; sa_sint_t t = c0 | SAINT_MIN; SA[--induction_bucket[c1]] = (c0 <= c1) ? p : t; }
}
}
else
{
libsais_final_bwt_scan_right_to_left_8u_block_omp(T, SA, induction_bucket, block_end + 1, block_size, threads, thread_state);
block_start = block_end;
}
}
}
}
#else
UNUSED(thread_state);
#endif
return index;
}
static void libsais_final_bwt_aux_scan_right_to_left_8u_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t rm, sa_sint_t * RESTRICT I, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
if (threads == 1 || n < 65536)
{
libsais_final_bwt_aux_scan_right_to_left_8u(T, SA, rm, I, induction_bucket, 0, n);
}
#if defined(_OPENMP)
else
{
fast_sint_t block_start;
for (block_start = (fast_sint_t)n - 1; block_start >= 0; )
{
if (SA[block_start] == 0)
{
block_start--;
}
else
{
fast_sint_t block_max_end = block_start - ((fast_sint_t)threads) * ((LIBSAIS_PER_THREAD_CACHE_SIZE - 16 * (fast_sint_t)threads) / 2); if (block_max_end < 0) { block_max_end = -1; }
fast_sint_t block_end = block_start - 1; while (block_end > block_max_end && SA[block_end] != 0) { block_end--; }
fast_sint_t block_size = block_start - block_end;
if (block_size < 32)
{
for (; block_start > block_end; block_start -= 1)
{
sa_sint_t p = SA[block_start]; SA[block_start] = p & SAINT_MAX; if (p > 0) { p--; uint8_t c0 = T[p - (p > 0)], c1 = T[p]; SA[block_start] = c1; sa_sint_t t = c0 | SAINT_MIN; SA[--induction_bucket[c1]] = (c0 <= c1) ? p : t; if ((p & rm) == 0) { I[p / (rm + 1)] = induction_bucket[T[p]] + 1; } }
}
}
else
{
libsais_final_bwt_aux_scan_right_to_left_8u_block_omp(T, SA, rm, I, induction_bucket, block_end + 1, block_size, threads, thread_state);
block_start = block_end;
}
}
}
}
#else
UNUSED(thread_state);
#endif
}
static void libsais_final_sorting_scan_right_to_left_8u_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
if (threads == 1 || n < 65536)
{
libsais_final_sorting_scan_right_to_left_8u(T, SA, induction_bucket, 0, n);
}
#if defined(_OPENMP)
else
{
fast_sint_t block_start;
for (block_start = (fast_sint_t)n - 1; block_start >= 0; )
{
if (SA[block_start] == 0)
{
block_start--;
}
else
{
fast_sint_t block_max_end = block_start - ((fast_sint_t)threads) * (LIBSAIS_PER_THREAD_CACHE_SIZE - 16 * (fast_sint_t)threads); if (block_max_end < -1) { block_max_end = -1; }
fast_sint_t block_end = block_start - 1; while (block_end > block_max_end && SA[block_end] != 0) { block_end--; }
fast_sint_t block_size = block_start - block_end;
if (block_size < 32)
{
for (; block_start > block_end; block_start -= 1)
{
sa_sint_t p = SA[block_start]; SA[block_start] = p & SAINT_MAX; if (p > 0) { p--; SA[--induction_bucket[T[p]]] = p | ((sa_sint_t)(T[p - (p > 0)] > T[p]) << (SAINT_BIT - 1)); }
}
}
else
{
libsais_final_sorting_scan_right_to_left_8u_block_omp(T, SA, induction_bucket, block_end + 1, block_size, threads, thread_state);
block_start = block_end;
}
}
}
}
#else
UNUSED(thread_state);
#endif
}
static void libsais_final_sorting_scan_right_to_left_32s_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
if (threads == 1 || n < 65536)
{
libsais_final_sorting_scan_right_to_left_32s(T, SA, induction_bucket, 0, n);
}
#if defined(_OPENMP)
else
{
fast_sint_t block_start, block_end;
for (block_start = (fast_sint_t)n - 1; block_start >= 0; block_start = block_end)
{
block_end = block_start - (fast_sint_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE; if (block_end < 0) { block_end = -1; }
libsais_final_sorting_scan_right_to_left_32s_block_omp(T, SA, induction_bucket, thread_state[0].state.cache, block_end + 1, block_start - block_end, threads);
}
}
#else
UNUSED(thread_state);
#endif
}
static void libsais_clear_lms_suffixes_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT bucket_start, sa_sint_t * RESTRICT bucket_end, sa_sint_t threads)
{
fast_sint_t c;
#if defined(_OPENMP)
#pragma omp parallel for schedule(static, 1) num_threads(threads) if(threads > 1 && n >= 65536)
#else
UNUSED(threads); UNUSED(n);
#endif
for (c = 0; c < k; ++c)
{
if (bucket_end[c] > bucket_start[c])
{
memset(&SA[bucket_start[c]], 0, ((size_t)bucket_end[c] - (size_t)bucket_start[c]) * sizeof(sa_sint_t));
}
}
}
static sa_sint_t libsais_induce_final_order_8u_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t bwt, sa_sint_t r, sa_sint_t * RESTRICT I, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
if (!bwt)
{
libsais_final_sorting_scan_left_to_right_8u_omp(T, SA, n, &buckets[6 * ALPHABET_SIZE], threads, thread_state);
if (threads > 1 && n >= 65536) { libsais_clear_lms_suffixes_omp(SA, n, ALPHABET_SIZE, &buckets[6 * ALPHABET_SIZE], &buckets[7 * ALPHABET_SIZE], threads); }
libsais_final_sorting_scan_right_to_left_8u_omp(T, SA, n, &buckets[7 * ALPHABET_SIZE], threads, thread_state);
return 0;
}
else if (I != NULL)
{
libsais_final_bwt_aux_scan_left_to_right_8u_omp(T, SA, n, r - 1, I, &buckets[6 * ALPHABET_SIZE], threads, thread_state);
if (threads > 1 && n >= 65536) { libsais_clear_lms_suffixes_omp(SA, n, ALPHABET_SIZE, &buckets[6 * ALPHABET_SIZE], &buckets[7 * ALPHABET_SIZE], threads); }
libsais_final_bwt_aux_scan_right_to_left_8u_omp(T, SA, n, r - 1, I, &buckets[7 * ALPHABET_SIZE], threads, thread_state);
return 0;
}
else
{
libsais_final_bwt_scan_left_to_right_8u_omp(T, SA, n, &buckets[6 * ALPHABET_SIZE], threads, thread_state);
if (threads > 1 && n >= 65536) { libsais_clear_lms_suffixes_omp(SA, n, ALPHABET_SIZE, &buckets[6 * ALPHABET_SIZE], &buckets[7 * ALPHABET_SIZE], threads); }
return libsais_final_bwt_scan_right_to_left_8u_omp(T, SA, n, &buckets[7 * ALPHABET_SIZE], threads, thread_state);
}
}
static void libsais_induce_final_order_32s_6k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
libsais_final_sorting_scan_left_to_right_32s_omp(T, SA, n, &buckets[4 * k], threads, thread_state);
libsais_final_sorting_scan_right_to_left_32s_omp(T, SA, n, &buckets[5 * k], threads, thread_state);
}
static void libsais_induce_final_order_32s_4k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
libsais_final_sorting_scan_left_to_right_32s_omp(T, SA, n, &buckets[2 * k], threads, thread_state);
libsais_final_sorting_scan_right_to_left_32s_omp(T, SA, n, &buckets[3 * k], threads, thread_state);
}
static void libsais_induce_final_order_32s_2k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
libsais_final_sorting_scan_left_to_right_32s_omp(T, SA, n, &buckets[1 * k], threads, thread_state);
libsais_final_sorting_scan_right_to_left_32s_omp(T, SA, n, &buckets[0 * k], threads, thread_state);
}
static void libsais_induce_final_order_32s_1k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
libsais_count_suffixes_32s(T, n, k, buckets);
libsais_initialize_buckets_start_32s_1k(k, buckets);
libsais_final_sorting_scan_left_to_right_32s_omp(T, SA, n, buckets, threads, thread_state);
libsais_count_suffixes_32s(T, n, k, buckets);
libsais_initialize_buckets_end_32s_1k(k, buckets);
libsais_final_sorting_scan_right_to_left_32s_omp(T, SA, n, buckets, threads, thread_state);
}
static sa_sint_t libsais_renumber_unique_and_nonunique_lms_suffixes_32s(sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t m, sa_sint_t f, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
sa_sint_t * RESTRICT SAm = &SA[m];
sa_sint_t i, j;
for (i = (sa_sint_t)omp_block_start, j = (sa_sint_t)omp_block_start + (sa_sint_t)omp_block_size - 2 * (sa_sint_t)prefetch_distance - 3; i < j; i += 4)
{
libsais_prefetch(&SA[i + 3 * prefetch_distance]);
libsais_prefetchw(&SAm[((sa_uint_t)SA[i + 2 * prefetch_distance + 0]) >> 1]);
libsais_prefetchw(&SAm[((sa_uint_t)SA[i + 2 * prefetch_distance + 1]) >> 1]);
libsais_prefetchw(&SAm[((sa_uint_t)SA[i + 2 * prefetch_distance + 2]) >> 1]);
libsais_prefetchw(&SAm[((sa_uint_t)SA[i + 2 * prefetch_distance + 3]) >> 1]);
sa_uint_t q0 = (sa_uint_t)SA[i + prefetch_distance + 0]; const sa_sint_t * Tq0 = &T[q0]; libsais_prefetchw(SAm[q0 >> 1] < 0 ? Tq0 : NULL);
sa_uint_t q1 = (sa_uint_t)SA[i + prefetch_distance + 1]; const sa_sint_t * Tq1 = &T[q1]; libsais_prefetchw(SAm[q1 >> 1] < 0 ? Tq1 : NULL);
sa_uint_t q2 = (sa_uint_t)SA[i + prefetch_distance + 2]; const sa_sint_t * Tq2 = &T[q2]; libsais_prefetchw(SAm[q2 >> 1] < 0 ? Tq2 : NULL);
sa_uint_t q3 = (sa_uint_t)SA[i + prefetch_distance + 3]; const sa_sint_t * Tq3 = &T[q3]; libsais_prefetchw(SAm[q3 >> 1] < 0 ? Tq3 : NULL);
sa_uint_t p0 = (sa_uint_t)SA[i + 0]; sa_sint_t s0 = SAm[p0 >> 1]; if (s0 < 0) { T[p0] |= SAINT_MIN; f++; s0 = i + 0 + SAINT_MIN + f; } SAm[p0 >> 1] = s0 - f;
sa_uint_t p1 = (sa_uint_t)SA[i + 1]; sa_sint_t s1 = SAm[p1 >> 1]; if (s1 < 0) { T[p1] |= SAINT_MIN; f++; s1 = i + 1 + SAINT_MIN + f; } SAm[p1 >> 1] = s1 - f;
sa_uint_t p2 = (sa_uint_t)SA[i + 2]; sa_sint_t s2 = SAm[p2 >> 1]; if (s2 < 0) { T[p2] |= SAINT_MIN; f++; s2 = i + 2 + SAINT_MIN + f; } SAm[p2 >> 1] = s2 - f;
sa_uint_t p3 = (sa_uint_t)SA[i + 3]; sa_sint_t s3 = SAm[p3 >> 1]; if (s3 < 0) { T[p3] |= SAINT_MIN; f++; s3 = i + 3 + SAINT_MIN + f; } SAm[p3 >> 1] = s3 - f;
}
for (j += 2 * (sa_sint_t)prefetch_distance + 3; i < j; i += 1)
{
sa_uint_t p = (sa_uint_t)SA[i]; sa_sint_t s = SAm[p >> 1]; if (s < 0) { T[p] |= SAINT_MIN; f++; s = i + SAINT_MIN + f; } SAm[p >> 1] = s - f;
}
return f;
}
static void libsais_compact_unique_and_nonunique_lms_suffixes_32s(sa_sint_t * RESTRICT SA, sa_sint_t m, fast_sint_t * pl, fast_sint_t * pr, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
sa_sint_t * RESTRICT SAl = &SA[0];
sa_sint_t * RESTRICT SAr = &SA[0];
fast_sint_t i, j, l = *pl - 1, r = *pr - 1;
for (i = (fast_sint_t)m + omp_block_start + omp_block_size - 1, j = (fast_sint_t)m + omp_block_start + 3; i >= j; i -= 4)
{
libsais_prefetch(&SA[i - prefetch_distance]);
sa_sint_t p0 = SA[i - 0]; SAl[l] = p0 & SAINT_MAX; l -= p0 < 0; SAr[r] = p0 - 1; r -= p0 > 0;
sa_sint_t p1 = SA[i - 1]; SAl[l] = p1 & SAINT_MAX; l -= p1 < 0; SAr[r] = p1 - 1; r -= p1 > 0;
sa_sint_t p2 = SA[i - 2]; SAl[l] = p2 & SAINT_MAX; l -= p2 < 0; SAr[r] = p2 - 1; r -= p2 > 0;
sa_sint_t p3 = SA[i - 3]; SAl[l] = p3 & SAINT_MAX; l -= p3 < 0; SAr[r] = p3 - 1; r -= p3 > 0;
}
for (j -= 3; i >= j; i -= 1)
{
sa_sint_t p = SA[i]; SAl[l] = p & SAINT_MAX; l -= p < 0; SAr[r] = p - 1; r -= p > 0;
}
*pl = l + 1; *pr = r + 1;
}
#if defined(_OPENMP)
static sa_sint_t libsais_count_unique_suffixes(sa_sint_t * RESTRICT SA, sa_sint_t m, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
sa_sint_t * RESTRICT SAm = &SA[m];
fast_sint_t i, j; sa_sint_t f0 = 0, f1 = 0, f2 = 0, f3 = 0;
for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 3; i < j; i += 4)
{
libsais_prefetch(&SA[i + 2 * prefetch_distance]);
libsais_prefetch(&SAm[((sa_uint_t)SA[i + prefetch_distance + 0]) >> 1]);
libsais_prefetch(&SAm[((sa_uint_t)SA[i + prefetch_distance + 1]) >> 1]);
libsais_prefetch(&SAm[((sa_uint_t)SA[i + prefetch_distance + 2]) >> 1]);
libsais_prefetch(&SAm[((sa_uint_t)SA[i + prefetch_distance + 3]) >> 1]);
f0 += SAm[((sa_uint_t)SA[i + 0]) >> 1] < 0;
f1 += SAm[((sa_uint_t)SA[i + 1]) >> 1] < 0;
f2 += SAm[((sa_uint_t)SA[i + 2]) >> 1] < 0;
f3 += SAm[((sa_uint_t)SA[i + 3]) >> 1] < 0;
}
for (j += prefetch_distance + 3; i < j; i += 1)
{
f0 += SAm[((sa_uint_t)SA[i]) >> 1] < 0;
}
return f0 + f1 + f2 + f3;
}
#endif
static sa_sint_t libsais_renumber_unique_and_nonunique_lms_suffixes_32s_omp(sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t m, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
sa_sint_t f = 0;
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && m >= 65536)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(thread_state);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (m / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : m - omp_block_start;
if (omp_num_threads == 1)
{
f = libsais_renumber_unique_and_nonunique_lms_suffixes_32s(T, SA, m, 0, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
thread_state[omp_thread_num].state.count = libsais_count_unique_suffixes(SA, m, omp_block_start, omp_block_size);
}
#pragma omp barrier
{
fast_sint_t t, count = 0; for (t = 0; t < omp_thread_num; ++t) { count += thread_state[t].state.count; }
if (omp_thread_num == omp_num_threads - 1)
{
f = (sa_sint_t)(count + thread_state[omp_thread_num].state.count);
}
libsais_renumber_unique_and_nonunique_lms_suffixes_32s(T, SA, m, (sa_sint_t)count, omp_block_start, omp_block_size);
}
}
#endif
}
return f;
}
static void libsais_compact_unique_and_nonunique_lms_suffixes_32s_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t fs, sa_sint_t f, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && n >= 131072 && m < fs)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(thread_state);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (((fast_sint_t)n >> 1) / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : ((fast_sint_t)n >> 1) - omp_block_start;
if (omp_num_threads == 1)
{
fast_sint_t l = m, r = (fast_sint_t)n + (fast_sint_t)fs;
libsais_compact_unique_and_nonunique_lms_suffixes_32s(SA, m, &l, &r, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
thread_state[omp_thread_num].state.position = (fast_sint_t)m + ((fast_sint_t)n >> 1) + omp_block_start + omp_block_size;
thread_state[omp_thread_num].state.count = (fast_sint_t)m + omp_block_start + omp_block_size;
libsais_compact_unique_and_nonunique_lms_suffixes_32s(SA, m, &thread_state[omp_thread_num].state.position, &thread_state[omp_thread_num].state.count, omp_block_start, omp_block_size);
}
#pragma omp barrier
#pragma omp master
{
fast_sint_t t, position;
for (position = m, t = omp_num_threads - 1; t >= 0; --t)
{
fast_sint_t omp_block_end = t < omp_num_threads - 1 ? omp_block_stride * (t + 1) : ((fast_sint_t)n >> 1);
fast_sint_t count = ((fast_sint_t)m + ((fast_sint_t)n >> 1) + omp_block_end - thread_state[t].state.position);
if (count > 0)
{
position -= count; memcpy(&SA[position], &SA[thread_state[t].state.position], (size_t)count * sizeof(sa_sint_t));
}
}
for (position = (fast_sint_t)n + (fast_sint_t)fs, t = omp_num_threads - 1; t >= 0; --t)
{
fast_sint_t omp_block_end = t < omp_num_threads - 1 ? omp_block_stride * (t + 1) : ((fast_sint_t)n >> 1);
fast_sint_t count = ((fast_sint_t)m + omp_block_end - thread_state[t].state.count);
if (count > 0)
{
position -= count; memcpy(&SA[position], &SA[thread_state[t].state.count], (size_t)count * sizeof(sa_sint_t));
}
}
}
}
#endif
}
memcpy(&SA[(fast_sint_t)n + (fast_sint_t)fs - (fast_sint_t)m], &SA[(fast_sint_t)m - (fast_sint_t)f], (size_t)f * sizeof(sa_sint_t));
}
static sa_sint_t libsais_compact_lms_suffixes_32s_omp(sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t fs, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
sa_sint_t f = libsais_renumber_unique_and_nonunique_lms_suffixes_32s_omp(T, SA, m, threads, thread_state);
libsais_compact_unique_and_nonunique_lms_suffixes_32s_omp(SA, n, m, fs, f, threads, thread_state);
return f;
}
static void libsais_merge_unique_lms_suffixes_32s(sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, fast_sint_t l, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
const sa_sint_t * RESTRICT SAnm = &SA[(fast_sint_t)n - (fast_sint_t)m - 1 + l];
sa_sint_t i, j; fast_sint_t tmp = *SAnm++;
for (i = (sa_sint_t)omp_block_start, j = (sa_sint_t)omp_block_start + (sa_sint_t)omp_block_size - 6; i < j; i += 4)
{
libsais_prefetch(&T[i + prefetch_distance]);
sa_sint_t c0 = T[i + 0]; if (c0 < 0) { T[i + 0] = c0 & SAINT_MAX; SA[tmp] = i + 0; i++; tmp = *SAnm++; }
sa_sint_t c1 = T[i + 1]; if (c1 < 0) { T[i + 1] = c1 & SAINT_MAX; SA[tmp] = i + 1; i++; tmp = *SAnm++; }
sa_sint_t c2 = T[i + 2]; if (c2 < 0) { T[i + 2] = c2 & SAINT_MAX; SA[tmp] = i + 2; i++; tmp = *SAnm++; }
sa_sint_t c3 = T[i + 3]; if (c3 < 0) { T[i + 3] = c3 & SAINT_MAX; SA[tmp] = i + 3; i++; tmp = *SAnm++; }
}
for (j += 6; i < j; i += 1)
{
sa_sint_t c = T[i]; if (c < 0) { T[i] = c & SAINT_MAX; SA[tmp] = i; i++; tmp = *SAnm++; }
}
}
static void libsais_merge_nonunique_lms_suffixes_32s(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, fast_sint_t l, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
const fast_sint_t prefetch_distance = 32;
const sa_sint_t * RESTRICT SAnm = &SA[(fast_sint_t)n - (fast_sint_t)m - 1 + l];
fast_sint_t i, j; sa_sint_t tmp = *SAnm++;
for (i = omp_block_start, j = omp_block_start + omp_block_size - 3; i < j; i += 4)
{
libsais_prefetch(&SA[i + prefetch_distance]);
if (SA[i + 0] == 0) { SA[i + 0] = tmp; tmp = *SAnm++; }
if (SA[i + 1] == 0) { SA[i + 1] = tmp; tmp = *SAnm++; }
if (SA[i + 2] == 0) { SA[i + 2] = tmp; tmp = *SAnm++; }
if (SA[i + 3] == 0) { SA[i + 3] = tmp; tmp = *SAnm++; }
}
for (j += 3; i < j; i += 1)
{
if (SA[i] == 0) { SA[i] = tmp; tmp = *SAnm++; }
}
}
static void libsais_merge_unique_lms_suffixes_32s_omp(sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(thread_state);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (n / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : n - omp_block_start;
if (omp_num_threads == 1)
{
libsais_merge_unique_lms_suffixes_32s(T, SA, n, m, 0, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
thread_state[omp_thread_num].state.count = libsais_count_negative_marked_suffixes(T, omp_block_start, omp_block_size);
}
#pragma omp barrier
{
fast_sint_t t, count = 0; for (t = 0; t < omp_thread_num; ++t) { count += thread_state[t].state.count; }
libsais_merge_unique_lms_suffixes_32s(T, SA, n, m, count, omp_block_start, omp_block_size);
}
}
#endif
}
}
static void libsais_merge_nonunique_lms_suffixes_32s_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t f, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && m >= 65536)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads); UNUSED(thread_state);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = (m / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : m - omp_block_start;
if (omp_num_threads == 1)
{
libsais_merge_nonunique_lms_suffixes_32s(SA, n, m, f, omp_block_start, omp_block_size);
}
#if defined(_OPENMP)
else
{
{
thread_state[omp_thread_num].state.count = libsais_count_zero_marked_suffixes(SA, omp_block_start, omp_block_size);
}
#pragma omp barrier
{
fast_sint_t t, count = f; for (t = 0; t < omp_thread_num; ++t) { count += thread_state[t].state.count; }
libsais_merge_nonunique_lms_suffixes_32s(SA, n, m, count, omp_block_start, omp_block_size);
}
}
#endif
}
}
static void libsais_merge_compacted_lms_suffixes_32s_omp(sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t f, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
libsais_merge_unique_lms_suffixes_32s_omp(T, SA, n, m, threads, thread_state);
libsais_merge_nonunique_lms_suffixes_32s_omp(SA, n, m, f, threads, thread_state);
}
static void libsais_reconstruct_compacted_lms_suffixes_32s_2k_omp(sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t m, sa_sint_t fs, sa_sint_t f, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
if (f > 0)
{
memmove(&SA[n - m - 1], &SA[n + fs - m], (size_t)f * sizeof(sa_sint_t));
libsais_count_and_gather_compacted_lms_suffixes_32s_2k_omp(T, SA, n, k, buckets, threads, thread_state);
libsais_reconstruct_lms_suffixes_omp(SA, n, m - f, threads);
memcpy(&SA[n - m - 1 + f], &SA[0], ((size_t)m - (size_t)f) * sizeof(sa_sint_t));
memset(&SA[0], 0, (size_t)m * sizeof(sa_sint_t));
libsais_merge_compacted_lms_suffixes_32s_omp(T, SA, n, m, f, threads, thread_state);
}
else
{
libsais_count_and_gather_lms_suffixes_32s_2k(T, SA, n, k, buckets, 0, n);
libsais_reconstruct_lms_suffixes_omp(SA, n, m, threads);
}
}
static void libsais_reconstruct_compacted_lms_suffixes_32s_1k_omp(sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t fs, sa_sint_t f, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
if (f > 0)
{
memmove(&SA[n - m - 1], &SA[n + fs - m], (size_t)f * sizeof(sa_sint_t));
libsais_gather_compacted_lms_suffixes_32s(T, SA, n);
libsais_reconstruct_lms_suffixes_omp(SA, n, m - f, threads);
memcpy(&SA[n - m - 1 + f], &SA[0], ((size_t)m - (size_t)f) * sizeof(sa_sint_t));
memset(&SA[0], 0, (size_t)m * sizeof(sa_sint_t));
libsais_merge_compacted_lms_suffixes_32s_omp(T, SA, n, m, f, threads, thread_state);
}
else
{
libsais_gather_lms_suffixes_32s(T, SA, n);
libsais_reconstruct_lms_suffixes_omp(SA, n, m, threads);
}
}
static sa_sint_t libsais_main_32s(sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t fs, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
if (k > 0 && fs / k >= 6)
{
sa_sint_t alignment = (fs - 1024) / k >= 6 ? 1024 : 16;
sa_sint_t * RESTRICT buckets = (fs - alignment) / k >= 6 ? (sa_sint_t *)libsais_align_up(&SA[n + fs - 6 * k - alignment], (size_t)alignment * sizeof(sa_sint_t)) : &SA[n + fs - 6 * k];
sa_sint_t m = libsais_count_and_gather_lms_suffixes_32s_4k_omp(T, SA, n, k, buckets, threads, thread_state);
if (m > 1)
{
memset(SA, 0, ((size_t)n - (size_t)m) * sizeof(sa_sint_t));
sa_sint_t first_lms_suffix = SA[n - m];
sa_sint_t left_suffixes_count = libsais_initialize_buckets_for_lms_suffixes_radix_sort_32s_6k(T, k, buckets, first_lms_suffix);
libsais_radix_sort_lms_suffixes_32s_6k_omp(T, SA, n, m, &buckets[4 * k], threads, thread_state);
libsais_radix_sort_set_markers_32s_6k_omp(SA, k, &buckets[4 * k], threads);
if (threads > 1 && n >= 65536) { memset(&SA[(fast_sint_t)n - (fast_sint_t)m], 0, (size_t)m * sizeof(sa_sint_t)); }
libsais_initialize_buckets_for_partial_sorting_32s_6k(T, k, buckets, first_lms_suffix, left_suffixes_count);
libsais_induce_partial_order_32s_6k_omp(T, SA, n, k, buckets, first_lms_suffix, left_suffixes_count, threads, thread_state);
sa_sint_t names = libsais_renumber_and_mark_distinct_lms_suffixes_32s_4k_omp(SA, n, m, threads, thread_state);
if (names < m)
{
sa_sint_t f = libsais_compact_lms_suffixes_32s_omp(T, SA, n, m, fs, threads, thread_state);
if (libsais_main_32s(SA + n + fs - m + f, SA, m - f, names - f, fs + n - 2 * m + f, threads, thread_state) != 0)
{
return -2;
}
libsais_reconstruct_compacted_lms_suffixes_32s_2k_omp(T, SA, n, k, m, fs, f, buckets, threads, thread_state);
}
else
{
libsais_count_lms_suffixes_32s_2k(T, n, k, buckets);
}
libsais_initialize_buckets_start_and_end_32s_4k(k, buckets);
libsais_place_lms_suffixes_histogram_32s_4k(SA, n, k, m, buckets);
libsais_induce_final_order_32s_4k(T, SA, n, k, buckets, threads, thread_state);
}
else
{
SA[0] = SA[n - 1];
libsais_initialize_buckets_start_and_end_32s_6k(k, buckets);
libsais_place_lms_suffixes_histogram_32s_6k(SA, n, k, m, buckets);
libsais_induce_final_order_32s_6k(T, SA, n, k, buckets, threads, thread_state);
}
return 0;
}
else if (k > 0 && fs / k >= 4)
{
sa_sint_t alignment = (fs - 1024) / k >= 4 ? 1024 : 16;
sa_sint_t * RESTRICT buckets = (fs - alignment) / k >= 4 ? (sa_sint_t *)libsais_align_up(&SA[n + fs - 4 * k - alignment], (size_t)alignment * sizeof(sa_sint_t)) : &SA[n + fs - 4 * k];
sa_sint_t m = libsais_count_and_gather_lms_suffixes_32s_2k_omp(T, SA, n, k, buckets, threads, thread_state);
if (m > 1)
{
libsais_initialize_buckets_for_radix_and_partial_sorting_32s_4k(T, k, buckets, SA[n - m]);
libsais_radix_sort_lms_suffixes_32s_2k_omp(T, SA, n, m, &buckets[1], threads, thread_state);
libsais_radix_sort_set_markers_32s_4k_omp(SA, k, &buckets[1], threads);
libsais_place_lms_suffixes_interval_32s_4k(SA, n, k, m - 1, buckets);
libsais_induce_partial_order_32s_4k_omp(T, SA, n, k, buckets, threads, thread_state);
sa_sint_t names = libsais_renumber_and_mark_distinct_lms_suffixes_32s_4k_omp(SA, n, m, threads, thread_state);
if (names < m)
{
sa_sint_t f = libsais_compact_lms_suffixes_32s_omp(T, SA, n, m, fs, threads, thread_state);
if (libsais_main_32s(SA + n + fs - m + f, SA, m - f, names - f, fs + n - 2 * m + f, threads, thread_state) != 0)
{
return -2;
}
libsais_reconstruct_compacted_lms_suffixes_32s_2k_omp(T, SA, n, k, m, fs, f, buckets, threads, thread_state);
}
else
{
libsais_count_lms_suffixes_32s_2k(T, n, k, buckets);
}
}
else
{
SA[0] = SA[n - 1];
}
libsais_initialize_buckets_start_and_end_32s_4k(k, buckets);
libsais_place_lms_suffixes_histogram_32s_4k(SA, n, k, m, buckets);
libsais_induce_final_order_32s_4k(T, SA, n, k, buckets, threads, thread_state);
return 0;
}
else if (k > 0 && fs / k >= 2)
{
sa_sint_t alignment = (fs - 1024) / k >= 2 ? 1024 : 16;
sa_sint_t * RESTRICT buckets = (fs - alignment) / k >= 2 ? (sa_sint_t *)libsais_align_up(&SA[n + fs - 2 * k - alignment], (size_t)alignment * sizeof(sa_sint_t)) : &SA[n + fs - 2 * k];
sa_sint_t m = libsais_count_and_gather_lms_suffixes_32s_2k_omp(T, SA, n, k, buckets, threads, thread_state);
if (m > 1)
{
libsais_initialize_buckets_for_lms_suffixes_radix_sort_32s_2k(T, k, buckets, SA[n - m]);
libsais_radix_sort_lms_suffixes_32s_2k_omp(T, SA, n, m, &buckets[1], threads, thread_state);
libsais_place_lms_suffixes_interval_32s_2k(SA, n, k, m - 1, buckets);
libsais_initialize_buckets_start_and_end_32s_2k(k, buckets);
libsais_induce_partial_order_32s_2k_omp(T, SA, n, k, buckets, threads, thread_state);
sa_sint_t names = libsais_renumber_and_mark_distinct_lms_suffixes_32s_1k_omp(T, SA, n, m, threads);
if (names < m)
{
sa_sint_t f = libsais_compact_lms_suffixes_32s_omp(T, SA, n, m, fs, threads, thread_state);
if (libsais_main_32s(SA + n + fs - m + f, SA, m - f, names - f, fs + n - 2 * m + f, threads, thread_state) != 0)
{
return -2;
}
libsais_reconstruct_compacted_lms_suffixes_32s_2k_omp(T, SA, n, k, m, fs, f, buckets, threads, thread_state);
}
else
{
libsais_count_lms_suffixes_32s_2k(T, n, k, buckets);
}
}
else
{
SA[0] = SA[n - 1];
}
libsais_initialize_buckets_end_32s_2k(k, buckets);
libsais_place_lms_suffixes_histogram_32s_2k(SA, n, k, m, buckets);
libsais_initialize_buckets_start_and_end_32s_2k(k, buckets);
libsais_induce_final_order_32s_2k(T, SA, n, k, buckets, threads, thread_state);
return 0;
}
else
{
sa_sint_t * buffer = fs < k ? (sa_sint_t *)libsais_alloc_aligned((size_t)k * sizeof(sa_sint_t), 4096) : (sa_sint_t *)NULL;
sa_sint_t alignment = fs - 1024 >= k ? 1024 : 16;
sa_sint_t * RESTRICT buckets = fs - alignment >= k ? (sa_sint_t *)libsais_align_up(&SA[n + fs - k - alignment], (size_t)alignment * sizeof(sa_sint_t)) : fs >= k ? &SA[n + fs - k] : buffer;
if (buckets == NULL) { return -2; }
memset(SA, 0, (size_t)n * sizeof(sa_sint_t));
libsais_count_suffixes_32s(T, n, k, buckets);
libsais_initialize_buckets_end_32s_1k(k, buckets);
sa_sint_t m = libsais_radix_sort_lms_suffixes_32s_1k(T, SA, n, buckets);
if (m > 1)
{
libsais_induce_partial_order_32s_1k_omp(T, SA, n, k, buckets, threads, thread_state);
sa_sint_t names = libsais_renumber_and_mark_distinct_lms_suffixes_32s_1k_omp(T, SA, n, m, threads);
if (names < m)
{
if (buffer != NULL) { libsais_free_aligned(buffer); buckets = NULL; }
sa_sint_t f = libsais_compact_lms_suffixes_32s_omp(T, SA, n, m, fs, threads, thread_state);
if (libsais_main_32s(SA + n + fs - m + f, SA, m - f, names - f, fs + n - 2 * m + f, threads, thread_state) != 0)
{
return -2;
}
libsais_reconstruct_compacted_lms_suffixes_32s_1k_omp(T, SA, n, m, fs, f, threads, thread_state);
if (buckets == NULL) { buckets = buffer = (sa_sint_t *)libsais_alloc_aligned((size_t)k * sizeof(sa_sint_t), 4096); }
if (buckets == NULL) { return -2; }
}
libsais_count_suffixes_32s(T, n, k, buckets);
libsais_initialize_buckets_end_32s_1k(k, buckets);
libsais_place_lms_suffixes_interval_32s_1k(T, SA, k, m, buckets);
}
libsais_induce_final_order_32s_1k(T, SA, n, k, buckets, threads, thread_state);
libsais_free_aligned(buffer);
return 0;
}
}
int32_t libsais_main_32s_internal(int32_t * T, int32_t * SA, int32_t n, int32_t k, int32_t fs, int32_t threads)
{
LIBSAIS_THREAD_STATE * RESTRICT thread_state = threads > 1 ? libsais_alloc_thread_state(threads) : NULL;
sa_sint_t index = thread_state != NULL || threads == 1
? libsais_main_32s(T, SA, n, k, fs, threads, thread_state)
: -2;
libsais_free_thread_state(thread_state);
return index;
}
static sa_sint_t libsais_main_8u(const uint8_t * T, sa_sint_t * SA, sa_sint_t n, sa_sint_t * RESTRICT buckets, sa_sint_t bwt, sa_sint_t r, sa_sint_t * RESTRICT I, sa_sint_t fs, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state)
{
sa_sint_t m = libsais_count_and_gather_lms_suffixes_8u_omp(T, SA, n, buckets, threads, thread_state);
libsais_initialize_buckets_start_and_end_8u(buckets);
if (m > 0)
{
sa_sint_t first_lms_suffix = SA[n - m];
sa_sint_t left_suffixes_count = libsais_initialize_buckets_for_lms_suffixes_radix_sort_8u(T, buckets, first_lms_suffix);
if (threads > 1 && n >= 65536) { memset(SA, 0, ((size_t)n - (size_t)m) * sizeof(sa_sint_t)); }
libsais_radix_sort_lms_suffixes_8u_omp(T, SA, n, m, buckets, threads, thread_state);
if (threads > 1 && n >= 65536) { memset(&SA[(fast_sint_t)n - (fast_sint_t)m], 0, (size_t)m * sizeof(sa_sint_t)); }
libsais_initialize_buckets_for_partial_sorting_8u(T, buckets, first_lms_suffix, left_suffixes_count);
libsais_induce_partial_order_8u_omp(T, SA, n, buckets, first_lms_suffix, left_suffixes_count, threads, thread_state);
sa_sint_t names = libsais_renumber_and_gather_lms_suffixes_8u_omp(SA, n, m, fs, threads, thread_state);
if (names < m)
{
if (libsais_main_32s(SA + n + fs - m, SA, m, names, fs + n - 2 * m, threads, thread_state) != 0)
{
return -2;
}
libsais_gather_lms_suffixes_8u_omp(T, SA, n, threads, thread_state);
libsais_reconstruct_lms_suffixes_omp(SA, n, m, threads);
}
libsais_place_lms_suffixes_interval_8u(SA, n, m, buckets);
}
else
{
memset(SA, 0, (size_t)n * sizeof(sa_sint_t));
}
return libsais_induce_final_order_8u_omp(T, SA, n, bwt, r, I, buckets, threads, thread_state);
}
static sa_sint_t libsais_main(const uint8_t * T, sa_sint_t * SA, sa_sint_t n, sa_sint_t bwt, sa_sint_t r, sa_sint_t * I, sa_sint_t fs, sa_sint_t threads)
{
LIBSAIS_THREAD_STATE * RESTRICT thread_state = threads > 1 ? libsais_alloc_thread_state(threads) : NULL;
sa_sint_t * RESTRICT buckets = (sa_sint_t *)libsais_alloc_aligned(8 * ALPHABET_SIZE * sizeof(sa_sint_t), 4096);
sa_sint_t index = buckets != NULL && (thread_state != NULL || threads == 1)
? libsais_main_8u(T, SA, n, buckets, bwt, r, I, fs, threads, thread_state)
: -2;
libsais_free_aligned(buckets);
libsais_free_thread_state(thread_state);
return index;
}
static sa_sint_t libsais_main_ctx(const LIBSAIS_CONTEXT * ctx, const uint8_t * T, sa_sint_t * SA, sa_sint_t n, sa_sint_t bwt, sa_sint_t r, sa_sint_t * I, sa_sint_t fs)
{
return ctx != NULL && (ctx->buckets != NULL && (ctx->thread_state != NULL || ctx->threads == 1))
? libsais_main_8u(T, SA, n, ctx->buckets, bwt, r, I, fs, (sa_sint_t)ctx->threads, ctx->thread_state)
: -2;
}
static void libsais_bwt_copy_8u(uint8_t * RESTRICT U, sa_sint_t * RESTRICT A, sa_sint_t n)
{
const fast_sint_t prefetch_distance = 32;
fast_sint_t i, j;
for (i = 0, j = (fast_sint_t)n - 7; i < j; i += 8)
{
libsais_prefetch(&A[i + prefetch_distance]);
U[i + 0] = (uint8_t)A[i + 0];
U[i + 1] = (uint8_t)A[i + 1];
U[i + 2] = (uint8_t)A[i + 2];
U[i + 3] = (uint8_t)A[i + 3];
U[i + 4] = (uint8_t)A[i + 4];
U[i + 5] = (uint8_t)A[i + 5];
U[i + 6] = (uint8_t)A[i + 6];
U[i + 7] = (uint8_t)A[i + 7];
}
for (j += 7; i < j; i += 1)
{
U[i] = (uint8_t)A[i];
}
}
#if defined(_OPENMP)
static void libsais_bwt_copy_8u_omp(uint8_t * RESTRICT U, sa_sint_t * RESTRICT A, sa_sint_t n, sa_sint_t threads)
{
#if defined(_OPENMP)
#pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
fast_sint_t omp_block_stride = ((fast_sint_t)n / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : (fast_sint_t)n - omp_block_start;
#else
UNUSED(threads);
fast_sint_t omp_block_start = 0;
fast_sint_t omp_block_size = (fast_sint_t)n;
#endif
libsais_bwt_copy_8u(U + omp_block_start, A + omp_block_start, (sa_sint_t)omp_block_size);
}
}
#endif
void * libsais_create_ctx(void)
{
return (void *)libsais_create_ctx_main(1);
}
void libsais_free_ctx(void * ctx)
{
libsais_free_ctx_main((LIBSAIS_CONTEXT *)ctx);
}
int32_t libsais(const uint8_t * T, int32_t * SA, int32_t n, int32_t fs)
{
if ((T == NULL) || (SA == NULL) || (n < 0) || (fs < 0))
{
return -1;
}
else if (n < 2)
{
if (n == 1) { SA[0] = 0; }
return 0;
}
return libsais_main(T, SA, n, 0, 0, NULL, fs, 1);
}
int32_t libsais_ctx(const void * ctx, const uint8_t * T, int32_t * SA, int32_t n, int32_t fs)
{
if ((ctx == NULL) || (T == NULL) || (SA == NULL) || (n < 0) || (fs < 0))
{
return -1;
}
else if (n < 2)
{
if (n == 1) { SA[0] = 0; }
return 0;
}
return libsais_main_ctx((const LIBSAIS_CONTEXT *)ctx, T, SA, n, 0, 0, NULL, fs);
}
int32_t libsais_bwt(const uint8_t * T, uint8_t * U, int32_t * A, int32_t n, int32_t fs)
{
if ((T == NULL) || (U == NULL) || (A == NULL) || (n < 0) || (fs < 0))
{
return -1;
}
else if (n <= 1)
{
if (n == 1) { U[0] = T[0]; }
return n;
}
sa_sint_t index = libsais_main(T, A, n, 1, 0, NULL, fs, 1);
if (index >= 0)
{
index++;
U[0] = T[n - 1];
libsais_bwt_copy_8u(U + 1, A, index - 1);
libsais_bwt_copy_8u(U + index, A + index, n - index);
}
return index;
}
int32_t libsais_bwt_aux(const uint8_t * T, uint8_t * U, int32_t * A, int32_t n, int32_t fs, int32_t r, int32_t * I)
{
if ((T == NULL) || (U == NULL) || (A == NULL) || (n < 0) || (fs < 0) || (r < 2) || ((r & (r - 1)) != 0) || (I == NULL))
{
return -1;
}
else if (n <= 1)
{
if (n == 1) { U[0] = T[0]; }
I[0] = n;
return 0;
}
if (libsais_main(T, A, n, 1, r, I, fs, 1) != 0)
{
return -2;
}
U[0] = T[n - 1];
libsais_bwt_copy_8u(U + 1, A, I[0] - 1);
libsais_bwt_copy_8u(U + I[0], A + I[0], n - I[0]);
return 0;
}
int32_t libsais_bwt_ctx(const void * ctx, const uint8_t * T, uint8_t * U, int32_t * A, int32_t n, int32_t fs)
{
if ((ctx == NULL) || (T == NULL) || (U == NULL) || (A == NULL) || (n < 0) || (fs < 0))
{
return -1;
}
else if (n <= 1)
{
if (n == 1) { U[0] = T[0]; }
return n;
}
sa_sint_t index = libsais_main_ctx((const LIBSAIS_CONTEXT *)ctx, T, A, n, 1, 0, NULL, fs);
if (index >= 0)
{
index++;
U[0] = T[n - 1];
#if defined(_OPENMP)
libsais_bwt_copy_8u_omp(U + 1, A, index - 1, (sa_sint_t)((const LIBSAIS_CONTEXT *)ctx)->threads);
libsais_bwt_copy_8u_omp(U + index, A + index, n - index, (sa_sint_t)((const LIBSAIS_CONTEXT *)ctx)->threads);
#else
libsais_bwt_copy_8u(U + 1, A, index - 1);
libsais_bwt_copy_8u(U + index, A + index, n - index);
#endif
}
return index;
}
int32_t libsais_bwt_aux_ctx(const void * ctx, const uint8_t * T, uint8_t * U, int32_t * A, int32_t n, int32_t fs, int32_t r, int32_t * I)
{
if ((ctx == NULL) || (T == NULL) || (U == NULL) || (A == NULL) || (n < 0) || (fs < 0) || (r < 2) || ((r & (r - 1)) != 0) || (I == NULL))
{
return -1;
}
else if (n <= 1)
{
if (n == 1) { U[0] = T[0]; }
I[0] = n;
return 0;
}
if (libsais_main_ctx((const LIBSAIS_CONTEXT *)ctx, T, A, n, 1, r, I, fs) != 0)
{
return -2;
}
U[0] = T[n - 1];
#if defined(_OPENMP)
libsais_bwt_copy_8u_omp(U + 1, A, I[0] - 1, (sa_sint_t)((const LIBSAIS_CONTEXT *)ctx)->threads);
libsais_bwt_copy_8u_omp(U + I[0], A + I[0], n - I[0], (sa_sint_t)((const LIBSAIS_CONTEXT *)ctx)->threads);
#else
libsais_bwt_copy_8u(U + 1, A, I[0] - 1);
libsais_bwt_copy_8u(U + I[0], A + I[0], n - I[0]);
#endif
return 0;
}
#if defined(_OPENMP)
void * libsais_create_ctx_omp(int32_t threads)
{
if (threads < 0) { return NULL; }
threads = threads > 0 ? threads : omp_get_max_threads();
return (void *)libsais_create_ctx_main(threads);
}
int32_t libsais_omp(const uint8_t * T, int32_t * SA, int32_t n, int32_t fs, int32_t threads)
{
if ((T == NULL) || (SA == NULL) || (n < 0) || (fs < 0) || (threads < 0))
{
return -1;
}
else if (n < 2)
{
if (n == 1) { SA[0] = 0; }
return 0;
}
threads = threads > 0 ? threads : omp_get_max_threads();
return libsais_main(T, SA, n, 0, 0, NULL, fs, threads);
}
int32_t libsais_bwt_omp(const uint8_t * T, uint8_t * U, int32_t * A, int32_t n, int32_t fs, int32_t threads)
{
if ((T == NULL) || (U == NULL) || (A == NULL) || (n < 0) || (fs < 0) || (threads < 0))
{
return -1;
}
else if (n <= 1)
{
if (n == 1) { U[0] = T[0]; }
return n;
}
threads = threads > 0 ? threads : omp_get_max_threads();
sa_sint_t index = libsais_main(T, A, n, 1, 0, NULL, fs, threads);
if (index >= 0)
{
index++;
U[0] = T[n - 1];
libsais_bwt_copy_8u_omp(U + 1, A, index - 1, threads);
libsais_bwt_copy_8u_omp(U + index, A + index, n - index, threads);
}
return index;
}
int32_t libsais_bwt_aux_omp(const uint8_t * T, uint8_t * U, int32_t * A, int32_t n, int32_t fs, int32_t r, int32_t * I, int32_t threads)
{
if ((T == NULL) || (U == NULL) || (A == NULL) || (n < 0) || (fs < 0) || (r < 2) || ((r & (r - 1)) != 0) || (I == NULL) || (threads < 0))
{
return -1;
}
else if (n <= 1)
{
if (n == 1) { U[0] = T[0];}
I[0] = n;
return 0;
}
threads = threads > 0 ? threads : omp_get_max_threads();
if (libsais_main(T, A, n, 1, r, I, fs, threads) != 0)
{
return -2;
}
U[0] = T[n - 1];
libsais_bwt_copy_8u_omp(U + 1, A, I[0] - 1, threads);
libsais_bwt_copy_8u_omp(U + I[0], A + I[0], n - I[0], threads);
return 0;
}
#endif
static LIBSAIS_UNBWT_CONTEXT * libsais_unbwt_create_ctx_main(sa_sint_t threads)
{
LIBSAIS_UNBWT_CONTEXT * RESTRICT ctx = (LIBSAIS_UNBWT_CONTEXT *)libsais_alloc_aligned(sizeof(LIBSAIS_UNBWT_CONTEXT), 64);
sa_uint_t * RESTRICT bucket2 = (sa_uint_t *)libsais_alloc_aligned(ALPHABET_SIZE * ALPHABET_SIZE * sizeof(sa_uint_t), 4096);
uint16_t * RESTRICT fastbits = (uint16_t *)libsais_alloc_aligned((1 + (1 << UNBWT_FASTBITS)) * sizeof(uint16_t), 4096);
sa_uint_t * RESTRICT buckets = threads > 1 ? (sa_uint_t *)libsais_alloc_aligned((size_t)threads * (ALPHABET_SIZE + (ALPHABET_SIZE * ALPHABET_SIZE)) * sizeof(sa_uint_t), 4096) : NULL;
if (ctx != NULL && bucket2 != NULL && fastbits != NULL && (buckets != NULL || threads == 1))
{
ctx->bucket2 = bucket2;
ctx->fastbits = fastbits;
ctx->buckets = buckets;
ctx->threads = threads;
return ctx;
}
libsais_free_aligned(buckets);
libsais_free_aligned(fastbits);
libsais_free_aligned(bucket2);
libsais_free_aligned(ctx);
return NULL;
}
static void libsais_unbwt_free_ctx_main(LIBSAIS_UNBWT_CONTEXT * ctx)
{
if (ctx != NULL)
{
libsais_free_aligned(ctx->buckets);
libsais_free_aligned(ctx->fastbits);
libsais_free_aligned(ctx->bucket2);
libsais_free_aligned(ctx);
}
}
static void libsais_unbwt_compute_histogram(const uint8_t * RESTRICT T, fast_sint_t n, sa_uint_t * RESTRICT count)
{
const fast_sint_t prefetch_distance = 256;
const uint8_t * RESTRICT T_p = T;
if (n >= 1024)
{
sa_uint_t copy[4 * (ALPHABET_SIZE + 16)];
memset(copy, 0, 4 * (ALPHABET_SIZE + 16) * sizeof(sa_uint_t));
sa_uint_t * RESTRICT copy0 = copy + 0 * (ALPHABET_SIZE + 16);
sa_uint_t * RESTRICT copy1 = copy + 1 * (ALPHABET_SIZE + 16);
sa_uint_t * RESTRICT copy2 = copy + 2 * (ALPHABET_SIZE + 16);
sa_uint_t * RESTRICT copy3 = copy + 3 * (ALPHABET_SIZE + 16);
for (; T_p < (uint8_t * )((ptrdiff_t)(T + 63) & (-64)); T_p += 1) { copy0[T_p[0]]++; }
fast_uint_t x = ((const uint32_t *)(const void *)T_p)[0], y = ((const uint32_t *)(const void *)T_p)[1];
for (; T_p < (uint8_t * )((ptrdiff_t)(T + n - 8) & (-64)); T_p += 64)
{
libsais_prefetch(&T_p[prefetch_distance]);
fast_uint_t z = ((const uint32_t *)(const void *)T_p)[2], w = ((const uint32_t *)(const void *)T_p)[3];
copy0[(uint8_t)x]++; x >>= 8; copy1[(uint8_t)x]++; x >>= 8; copy2[(uint8_t)x]++; x >>= 8; copy3[x]++;
copy0[(uint8_t)y]++; y >>= 8; copy1[(uint8_t)y]++; y >>= 8; copy2[(uint8_t)y]++; y >>= 8; copy3[y]++;
x = ((const uint32_t *)(const void *)T_p)[4]; y = ((const uint32_t *)(const void *)T_p)[5];
copy0[(uint8_t)z]++; z >>= 8; copy1[(uint8_t)z]++; z >>= 8; copy2[(uint8_t)z]++; z >>= 8; copy3[z]++;
copy0[(uint8_t)w]++; w >>= 8; copy1[(uint8_t)w]++; w >>= 8; copy2[(uint8_t)w]++; w >>= 8; copy3[w]++;
z = ((const uint32_t *)(const void *)T_p)[6]; w = ((const uint32_t *)(const void *)T_p)[7];
copy0[(uint8_t)x]++; x >>= 8; copy1[(uint8_t)x]++; x >>= 8; copy2[(uint8_t)x]++; x >>= 8; copy3[x]++;
copy0[(uint8_t)y]++; y >>= 8; copy1[(uint8_t)y]++; y >>= 8; copy2[(uint8_t)y]++; y >>= 8; copy3[y]++;
x = ((const uint32_t *)(const void *)T_p)[8]; y = ((const uint32_t *)(const void *)T_p)[9];
copy0[(uint8_t)z]++; z >>= 8; copy1[(uint8_t)z]++; z >>= 8; copy2[(uint8_t)z]++; z >>= 8; copy3[z]++;
copy0[(uint8_t)w]++; w >>= 8; copy1[(uint8_t)w]++; w >>= 8; copy2[(uint8_t)w]++; w >>= 8; copy3[w]++;
z = ((const uint32_t *)(const void *)T_p)[10]; w = ((const uint32_t *)(const void *)T_p)[11];
copy0[(uint8_t)x]++; x >>= 8; copy1[(uint8_t)x]++; x >>= 8; copy2[(uint8_t)x]++; x >>= 8; copy3[x]++;
copy0[(uint8_t)y]++; y >>= 8; copy1[(uint8_t)y]++; y >>= 8; copy2[(uint8_t)y]++; y >>= 8; copy3[y]++;
x = ((const uint32_t *)(const void *)T_p)[12]; y = ((const uint32_t *)(const void *)T_p)[13];
copy0[(uint8_t)z]++; z >>= 8; copy1[(uint8_t)z]++; z >>= 8; copy2[(uint8_t)z]++; z >>= 8; copy3[z]++;
copy0[(uint8_t)w]++; w >>= 8; copy1[(uint8_t)w]++; w >>= 8; copy2[(uint8_t)w]++; w >>= 8; copy3[w]++;
z = ((const uint32_t *)(const void *)T_p)[14]; w = ((const uint32_t *)(const void *)T_p)[15];
copy0[(uint8_t)x]++; x >>= 8; copy1[(uint8_t)x]++; x >>= 8; copy2[(uint8_t)x]++; x >>= 8; copy3[x]++;
copy0[(uint8_t)y]++; y >>= 8; copy1[(uint8_t)y]++; y >>= 8; copy2[(uint8_t)y]++; y >>= 8; copy3[y]++;
x = ((const uint32_t *)(const void *)T_p)[16]; y = ((const uint32_t *)(const void *)T_p)[17];
copy0[(uint8_t)z]++; z >>= 8; copy1[(uint8_t)z]++; z >>= 8; copy2[(uint8_t)z]++; z >>= 8; copy3[z]++;
copy0[(uint8_t)w]++; w >>= 8; copy1[(uint8_t)w]++; w >>= 8; copy2[(uint8_t)w]++; w >>= 8; copy3[w]++;
}
copy0[(uint8_t)x]++; x >>= 8; copy1[(uint8_t)x]++; x >>= 8; copy2[(uint8_t)x]++; x >>= 8; copy3[x]++;
copy0[(uint8_t)y]++; y >>= 8; copy1[(uint8_t)y]++; y >>= 8; copy2[(uint8_t)y]++; y >>= 8; copy3[y]++;
T_p += 8;
fast_uint_t i; for (i = 0; i < ALPHABET_SIZE; i++) { count[i] += copy0[i] + copy1[i] + copy2[i] + copy3[i]; }
}
for (; T_p < T + n; T_p += 1) { count[T_p[0]]++; }
}
static void libsais_unbwt_transpose_bucket2(sa_uint_t * RESTRICT bucket2)
{
fast_uint_t x, y, c, d;
for (x = 0; x != ALPHABET_SIZE; x += 16)
{
for (c = x; c != x + 16; ++c)
{
for (d = c + 1; d != x + 16; ++d)
{
sa_uint_t tmp = bucket2[(d << 8) + c]; bucket2[(d << 8) + c] = bucket2[(c << 8) + d]; bucket2[(c << 8) + d] = tmp;
}
}
for (y = x + 16; y != ALPHABET_SIZE; y += 16)
{
for (c = x; c != x + 16; ++c)
{
sa_uint_t * bucket2_yc = &bucket2[(y << 8) + c];
sa_uint_t * bucket2_cy = &bucket2[(c << 8) + y];
sa_uint_t tmp00 = bucket2_yc[ 0 * 256]; bucket2_yc[ 0 * 256] = bucket2_cy[ 0]; bucket2_cy[ 0] = tmp00;
sa_uint_t tmp01 = bucket2_yc[ 1 * 256]; bucket2_yc[ 1 * 256] = bucket2_cy[ 1]; bucket2_cy[ 1] = tmp01;
sa_uint_t tmp02 = bucket2_yc[ 2 * 256]; bucket2_yc[ 2 * 256] = bucket2_cy[ 2]; bucket2_cy[ 2] = tmp02;
sa_uint_t tmp03 = bucket2_yc[ 3 * 256]; bucket2_yc[ 3 * 256] = bucket2_cy[ 3]; bucket2_cy[ 3] = tmp03;
sa_uint_t tmp04 = bucket2_yc[ 4 * 256]; bucket2_yc[ 4 * 256] = bucket2_cy[ 4]; bucket2_cy[ 4] = tmp04;
sa_uint_t tmp05 = bucket2_yc[ 5 * 256]; bucket2_yc[ 5 * 256] = bucket2_cy[ 5]; bucket2_cy[ 5] = tmp05;
sa_uint_t tmp06 = bucket2_yc[ 6 * 256]; bucket2_yc[ 6 * 256] = bucket2_cy[ 6]; bucket2_cy[ 6] = tmp06;
sa_uint_t tmp07 = bucket2_yc[ 7 * 256]; bucket2_yc[ 7 * 256] = bucket2_cy[ 7]; bucket2_cy[ 7] = tmp07;
sa_uint_t tmp08 = bucket2_yc[ 8 * 256]; bucket2_yc[ 8 * 256] = bucket2_cy[ 8]; bucket2_cy[ 8] = tmp08;
sa_uint_t tmp09 = bucket2_yc[ 9 * 256]; bucket2_yc[ 9 * 256] = bucket2_cy[ 9]; bucket2_cy[ 9] = tmp09;
sa_uint_t tmp10 = bucket2_yc[10 * 256]; bucket2_yc[10 * 256] = bucket2_cy[10]; bucket2_cy[10] = tmp10;
sa_uint_t tmp11 = bucket2_yc[11 * 256]; bucket2_yc[11 * 256] = bucket2_cy[11]; bucket2_cy[11] = tmp11;
sa_uint_t tmp12 = bucket2_yc[12 * 256]; bucket2_yc[12 * 256] = bucket2_cy[12]; bucket2_cy[12] = tmp12;
sa_uint_t tmp13 = bucket2_yc[13 * 256]; bucket2_yc[13 * 256] = bucket2_cy[13]; bucket2_cy[13] = tmp13;
sa_uint_t tmp14 = bucket2_yc[14 * 256]; bucket2_yc[14 * 256] = bucket2_cy[14]; bucket2_cy[14] = tmp14;
sa_uint_t tmp15 = bucket2_yc[15 * 256]; bucket2_yc[15 * 256] = bucket2_cy[15]; bucket2_cy[15] = tmp15;
}
}
}
}
static void libsais_unbwt_compute_bigram_histogram_single(const uint8_t * RESTRICT T, sa_uint_t * RESTRICT bucket1, sa_uint_t * RESTRICT bucket2, fast_uint_t index)
{
fast_uint_t sum, c;
for (sum = 1, c = 0; c < ALPHABET_SIZE; ++c)
{
fast_uint_t prev = sum; sum += bucket1[c]; bucket1[c] = (sa_uint_t)prev;
if (prev != sum)
{
sa_uint_t * RESTRICT bucket2_p = &bucket2[c << 8];
{
fast_uint_t hi = index; if (sum < hi) { hi = sum; }
libsais_unbwt_compute_histogram(&T[prev], (fast_sint_t)(hi - prev), bucket2_p);
}
{
fast_uint_t lo = index + 1; if (prev > lo) { lo = prev; }
libsais_unbwt_compute_histogram(&T[lo - 1], (fast_sint_t)(sum - lo), bucket2_p);
}
}
}
libsais_unbwt_transpose_bucket2(bucket2);
}
static void libsais_unbwt_calculate_fastbits(sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, fast_uint_t lastc, fast_uint_t shift)
{
fast_uint_t v, w, sum, c, d;
for (v = 0, w = 0, sum = 1, c = 0; c < ALPHABET_SIZE; ++c)
{
if (c == lastc) { sum += 1; }
for (d = 0; d < ALPHABET_SIZE; ++d, ++w)
{
fast_uint_t prev = sum; sum += bucket2[w]; bucket2[w] = (sa_uint_t)prev;
if (prev != sum)
{
for (; v <= ((sum - 1) >> shift); ++v) { fastbits[v] = (uint16_t)w; }
}
}
}
}
static void libsais_unbwt_calculate_biPSI(const uint8_t * RESTRICT T, sa_uint_t * RESTRICT P, sa_uint_t * RESTRICT bucket1, sa_uint_t * RESTRICT bucket2, fast_uint_t index, fast_sint_t omp_block_start, fast_sint_t omp_block_end)
{
{
fast_sint_t i = omp_block_start, j = (fast_sint_t)index; if (omp_block_end < j) { j = omp_block_end; }
for (; i < j; ++i)
{
fast_uint_t c = T[i];
fast_uint_t p = bucket1[c]++;
fast_sint_t t = (fast_sint_t)(index - p);
if (t != 0)
{
fast_uint_t w = (((fast_uint_t)T[p + (fast_uint_t)(t >> ((sizeof(fast_sint_t) * 8) - 1))]) << 8) + c;
P[bucket2[w]++] = (sa_uint_t)i;
}
}
}
{
fast_sint_t i = (fast_sint_t)index, j = omp_block_end; if (omp_block_start > i) { i = omp_block_start; }
for (i += 1; i <= j; ++i)
{
fast_uint_t c = T[i - 1];
fast_uint_t p = bucket1[c]++;
fast_sint_t t = (fast_sint_t)(index - p);
if (t != 0)
{
fast_uint_t w = (((fast_uint_t)T[p + (fast_uint_t)(t >> ((sizeof(fast_sint_t) * 8) - 1))]) << 8) + c;
P[bucket2[w]++] = (sa_uint_t)i;
}
}
}
}
static void libsais_unbwt_init_single(const uint8_t * RESTRICT T, sa_uint_t * RESTRICT P, sa_sint_t n, const sa_uint_t * RESTRICT I, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits)
{
sa_uint_t bucket1[ALPHABET_SIZE];
fast_uint_t index = I[0];
fast_uint_t lastc = T[0];
fast_uint_t shift = 0; while ((n >> shift) > (1 << UNBWT_FASTBITS)) { shift++; }
memset(bucket1, 0, ALPHABET_SIZE * sizeof(sa_uint_t));
memset(bucket2, 0, ALPHABET_SIZE * ALPHABET_SIZE * sizeof(sa_uint_t));
libsais_unbwt_compute_histogram(T, n, bucket1);
libsais_unbwt_compute_bigram_histogram_single(T, bucket1, bucket2, index);
libsais_unbwt_calculate_fastbits(bucket2, fastbits, lastc, shift);
libsais_unbwt_calculate_biPSI(T, P, bucket1, bucket2, index, 0, n);
}
#if defined(_OPENMP)
static void libsais_unbwt_compute_bigram_histogram_parallel(const uint8_t * RESTRICT T, fast_uint_t index, sa_uint_t * RESTRICT bucket1, sa_uint_t * RESTRICT bucket2, fast_sint_t omp_block_start, fast_sint_t omp_block_size)
{
fast_sint_t i;
for (i = omp_block_start; i < omp_block_start + omp_block_size; ++i)
{
fast_uint_t c = T[i];
fast_uint_t p = bucket1[c]++;
fast_sint_t t = (fast_sint_t)(index - p);
if (t != 0)
{
fast_uint_t w = (((fast_uint_t)T[p + (fast_uint_t)(t >> ((sizeof(fast_sint_t) * 8) - 1))]) << 8) + c;
bucket2[w]++;
}
}
}
static void libsais_unbwt_init_parallel(const uint8_t * RESTRICT T, sa_uint_t * RESTRICT P, sa_sint_t n, const sa_uint_t * RESTRICT I, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, sa_uint_t * RESTRICT buckets, sa_sint_t threads)
{
sa_uint_t bucket1[ALPHABET_SIZE];
fast_uint_t index = I[0];
fast_uint_t lastc = T[0];
fast_uint_t shift = 0; while ((n >> shift) > (1 << UNBWT_FASTBITS)) { shift++; }
memset(bucket1, 0, ALPHABET_SIZE * sizeof(sa_uint_t));
memset(bucket2, 0, ALPHABET_SIZE * ALPHABET_SIZE * sizeof(sa_uint_t));
#pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536)
{
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
if (omp_num_threads == 1)
{
libsais_unbwt_init_single(T, P, n, I, bucket2, fastbits);
}
else
{
sa_uint_t * RESTRICT bucket1_local = buckets + omp_thread_num * (ALPHABET_SIZE + (ALPHABET_SIZE * ALPHABET_SIZE));
sa_uint_t * RESTRICT bucket2_local = bucket1_local + ALPHABET_SIZE;
fast_sint_t omp_block_stride = (n / omp_num_threads) & (-16);
fast_sint_t omp_block_start = omp_thread_num * omp_block_stride;
fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : n - omp_block_start;
{
memset(bucket1_local, 0, ALPHABET_SIZE * sizeof(sa_uint_t));
libsais_unbwt_compute_histogram(T + omp_block_start, omp_block_size, bucket1_local);
}
#pragma omp barrier
#pragma omp master
{
{
sa_uint_t * RESTRICT bucket1_temp = buckets;
fast_sint_t t;
for (t = 0; t < omp_num_threads; ++t, bucket1_temp += ALPHABET_SIZE + (ALPHABET_SIZE * ALPHABET_SIZE))
{
fast_sint_t c; for (c = 0; c < ALPHABET_SIZE; c += 1) { sa_uint_t A = bucket1[c], B = bucket1_temp[c]; bucket1[c] = A + B; bucket1_temp[c] = A; }
}
}
{
fast_uint_t sum, c;
for (sum = 1, c = 0; c < ALPHABET_SIZE; ++c) { fast_uint_t prev = sum; sum += bucket1[c]; bucket1[c] = (sa_uint_t)prev; }
}
}
#pragma omp barrier
{
fast_sint_t c; for (c = 0; c < ALPHABET_SIZE; c += 1) { sa_uint_t A = bucket1[c], B = bucket1_local[c]; bucket1_local[c] = A + B; }
memset(bucket2_local, 0, ALPHABET_SIZE * ALPHABET_SIZE * sizeof(sa_uint_t));
libsais_unbwt_compute_bigram_histogram_parallel(T, index, bucket1_local, bucket2_local, omp_block_start, omp_block_size);
}
#pragma omp barrier
{
fast_sint_t omp_bucket2_stride = ((ALPHABET_SIZE * ALPHABET_SIZE) / omp_num_threads) & (-16);
fast_sint_t omp_bucket2_start = omp_thread_num * omp_bucket2_stride;
fast_sint_t omp_bucket2_size = omp_thread_num < omp_num_threads - 1 ? omp_bucket2_stride : (ALPHABET_SIZE * ALPHABET_SIZE) - omp_bucket2_start;
sa_uint_t * RESTRICT bucket2_temp = buckets + ALPHABET_SIZE;
fast_sint_t t;
for (t = 0; t < omp_num_threads; ++t, bucket2_temp += ALPHABET_SIZE + (ALPHABET_SIZE * ALPHABET_SIZE))
{
fast_sint_t c; for (c = omp_bucket2_start; c < omp_bucket2_start + omp_bucket2_size; c += 1) { sa_uint_t A = bucket2[c], B = bucket2_temp[c]; bucket2[c] = A + B; bucket2_temp[c] = A; }
}
}
#pragma omp barrier
#pragma omp master
{
libsais_unbwt_calculate_fastbits(bucket2, fastbits, lastc, shift);
{
fast_sint_t t;
for (t = omp_num_threads - 1; t >= 1; --t)
{
sa_uint_t * RESTRICT dst_bucket1 = buckets + t * (ALPHABET_SIZE + (ALPHABET_SIZE * ALPHABET_SIZE));
sa_uint_t * RESTRICT src_bucket1 = dst_bucket1 - (ALPHABET_SIZE + (ALPHABET_SIZE * ALPHABET_SIZE));
memcpy(dst_bucket1, src_bucket1, ALPHABET_SIZE * sizeof(sa_uint_t));
}
memcpy(buckets, bucket1, ALPHABET_SIZE * sizeof(sa_uint_t));
}
}
#pragma omp barrier
{
fast_sint_t c; for (c = 0; c < ALPHABET_SIZE * ALPHABET_SIZE; c += 1) { sa_uint_t A = bucket2[c], B = bucket2_local[c]; bucket2_local[c] = A + B; }
libsais_unbwt_calculate_biPSI(T, P, bucket1_local, bucket2_local, index, omp_block_start, omp_block_start + omp_block_size);
}
#pragma omp barrier
#pragma omp master
{
memcpy(bucket2, buckets + ALPHABET_SIZE + (omp_num_threads - 1) * (ALPHABET_SIZE + (ALPHABET_SIZE * ALPHABET_SIZE)), ALPHABET_SIZE * ALPHABET_SIZE * sizeof(sa_uint_t));
}
}
}
}
#endif
static void libsais_unbwt_decode_1(uint8_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, fast_uint_t shift, fast_uint_t * i0, fast_uint_t k)
{
uint16_t * RESTRICT U0 = (uint16_t *)(void *)U;
fast_uint_t i, p0 = *i0;
for (i = 0; i != k; ++i)
{
uint16_t c0 = fastbits[p0 >> shift]; if (bucket2[c0] <= p0) { do { c0++; } while (bucket2[c0] <= p0); } p0 = P[p0]; U0[i] = libsais_bswap16(c0);
}
*i0 = p0;
}
static void libsais_unbwt_decode_2(uint8_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, fast_uint_t shift, fast_uint_t r, fast_uint_t * i0, fast_uint_t * i1, fast_uint_t k)
{
uint16_t * RESTRICT U0 = (uint16_t *)(void *)U;
uint16_t * RESTRICT U1 = (uint16_t *)(void *)(((uint8_t *)U0) + r);
fast_uint_t i, p0 = *i0, p1 = *i1;
for (i = 0; i != k; ++i)
{
uint16_t c0 = fastbits[p0 >> shift]; if (bucket2[c0] <= p0) { do { c0++; } while (bucket2[c0] <= p0); } p0 = P[p0]; U0[i] = libsais_bswap16(c0);
uint16_t c1 = fastbits[p1 >> shift]; if (bucket2[c1] <= p1) { do { c1++; } while (bucket2[c1] <= p1); } p1 = P[p1]; U1[i] = libsais_bswap16(c1);
}
*i0 = p0; *i1 = p1;
}
static void libsais_unbwt_decode_3(uint8_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, fast_uint_t shift, fast_uint_t r, fast_uint_t * i0, fast_uint_t * i1, fast_uint_t * i2, fast_uint_t k)
{
uint16_t * RESTRICT U0 = (uint16_t *)(void *)U;
uint16_t * RESTRICT U1 = (uint16_t *)(void *)(((uint8_t *)U0) + r);
uint16_t * RESTRICT U2 = (uint16_t *)(void *)(((uint8_t *)U1) + r);
fast_uint_t i, p0 = *i0, p1 = *i1, p2 = *i2;
for (i = 0; i != k; ++i)
{
uint16_t c0 = fastbits[p0 >> shift]; if (bucket2[c0] <= p0) { do { c0++; } while (bucket2[c0] <= p0); } p0 = P[p0]; U0[i] = libsais_bswap16(c0);
uint16_t c1 = fastbits[p1 >> shift]; if (bucket2[c1] <= p1) { do { c1++; } while (bucket2[c1] <= p1); } p1 = P[p1]; U1[i] = libsais_bswap16(c1);
uint16_t c2 = fastbits[p2 >> shift]; if (bucket2[c2] <= p2) { do { c2++; } while (bucket2[c2] <= p2); } p2 = P[p2]; U2[i] = libsais_bswap16(c2);
}
*i0 = p0; *i1 = p1; *i2 = p2;
}
static void libsais_unbwt_decode_4(uint8_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, fast_uint_t shift, fast_uint_t r, fast_uint_t * i0, fast_uint_t * i1, fast_uint_t * i2, fast_uint_t * i3, fast_uint_t k)
{
uint16_t * RESTRICT U0 = (uint16_t *)(void *)U;
uint16_t * RESTRICT U1 = (uint16_t *)(void *)(((uint8_t *)U0) + r);
uint16_t * RESTRICT U2 = (uint16_t *)(void *)(((uint8_t *)U1) + r);
uint16_t * RESTRICT U3 = (uint16_t *)(void *)(((uint8_t *)U2) + r);
fast_uint_t i, p0 = *i0, p1 = *i1, p2 = *i2, p3 = *i3;
for (i = 0; i != k; ++i)
{
uint16_t c0 = fastbits[p0 >> shift]; if (bucket2[c0] <= p0) { do { c0++; } while (bucket2[c0] <= p0); } p0 = P[p0]; U0[i] = libsais_bswap16(c0);
uint16_t c1 = fastbits[p1 >> shift]; if (bucket2[c1] <= p1) { do { c1++; } while (bucket2[c1] <= p1); } p1 = P[p1]; U1[i] = libsais_bswap16(c1);
uint16_t c2 = fastbits[p2 >> shift]; if (bucket2[c2] <= p2) { do { c2++; } while (bucket2[c2] <= p2); } p2 = P[p2]; U2[i] = libsais_bswap16(c2);
uint16_t c3 = fastbits[p3 >> shift]; if (bucket2[c3] <= p3) { do { c3++; } while (bucket2[c3] <= p3); } p3 = P[p3]; U3[i] = libsais_bswap16(c3);
}
*i0 = p0; *i1 = p1; *i2 = p2; *i3 = p3;
}
static void libsais_unbwt_decode_5(uint8_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, fast_uint_t shift, fast_uint_t r, fast_uint_t * i0, fast_uint_t * i1, fast_uint_t * i2, fast_uint_t * i3, fast_uint_t * i4, fast_uint_t k)
{
uint16_t * RESTRICT U0 = (uint16_t *)(void *)U;
uint16_t * RESTRICT U1 = (uint16_t *)(void *)(((uint8_t *)U0) + r);
uint16_t * RESTRICT U2 = (uint16_t *)(void *)(((uint8_t *)U1) + r);
uint16_t * RESTRICT U3 = (uint16_t *)(void *)(((uint8_t *)U2) + r);
uint16_t * RESTRICT U4 = (uint16_t *)(void *)(((uint8_t *)U3) + r);
fast_uint_t i, p0 = *i0, p1 = *i1, p2 = *i2, p3 = *i3, p4 = *i4;
for (i = 0; i != k; ++i)
{
uint16_t c0 = fastbits[p0 >> shift]; if (bucket2[c0] <= p0) { do { c0++; } while (bucket2[c0] <= p0); } p0 = P[p0]; U0[i] = libsais_bswap16(c0);
uint16_t c1 = fastbits[p1 >> shift]; if (bucket2[c1] <= p1) { do { c1++; } while (bucket2[c1] <= p1); } p1 = P[p1]; U1[i] = libsais_bswap16(c1);
uint16_t c2 = fastbits[p2 >> shift]; if (bucket2[c2] <= p2) { do { c2++; } while (bucket2[c2] <= p2); } p2 = P[p2]; U2[i] = libsais_bswap16(c2);
uint16_t c3 = fastbits[p3 >> shift]; if (bucket2[c3] <= p3) { do { c3++; } while (bucket2[c3] <= p3); } p3 = P[p3]; U3[i] = libsais_bswap16(c3);
uint16_t c4 = fastbits[p4 >> shift]; if (bucket2[c4] <= p4) { do { c4++; } while (bucket2[c4] <= p4); } p4 = P[p4]; U4[i] = libsais_bswap16(c4);
}
*i0 = p0; *i1 = p1; *i2 = p2; *i3 = p3; *i4 = p4;
}
static void libsais_unbwt_decode_6(uint8_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, fast_uint_t shift, fast_uint_t r, fast_uint_t * i0, fast_uint_t * i1, fast_uint_t * i2, fast_uint_t * i3, fast_uint_t * i4, fast_uint_t * i5, fast_uint_t k)
{
uint16_t * RESTRICT U0 = (uint16_t *)(void *)U;
uint16_t * RESTRICT U1 = (uint16_t *)(void *)(((uint8_t *)U0) + r);
uint16_t * RESTRICT U2 = (uint16_t *)(void *)(((uint8_t *)U1) + r);
uint16_t * RESTRICT U3 = (uint16_t *)(void *)(((uint8_t *)U2) + r);
uint16_t * RESTRICT U4 = (uint16_t *)(void *)(((uint8_t *)U3) + r);
uint16_t * RESTRICT U5 = (uint16_t *)(void *)(((uint8_t *)U4) + r);
fast_uint_t i, p0 = *i0, p1 = *i1, p2 = *i2, p3 = *i3, p4 = *i4, p5 = *i5;
for (i = 0; i != k; ++i)
{
uint16_t c0 = fastbits[p0 >> shift]; if (bucket2[c0] <= p0) { do { c0++; } while (bucket2[c0] <= p0); } p0 = P[p0]; U0[i] = libsais_bswap16(c0);
uint16_t c1 = fastbits[p1 >> shift]; if (bucket2[c1] <= p1) { do { c1++; } while (bucket2[c1] <= p1); } p1 = P[p1]; U1[i] = libsais_bswap16(c1);
uint16_t c2 = fastbits[p2 >> shift]; if (bucket2[c2] <= p2) { do { c2++; } while (bucket2[c2] <= p2); } p2 = P[p2]; U2[i] = libsais_bswap16(c2);
uint16_t c3 = fastbits[p3 >> shift]; if (bucket2[c3] <= p3) { do { c3++; } while (bucket2[c3] <= p3); } p3 = P[p3]; U3[i] = libsais_bswap16(c3);
uint16_t c4 = fastbits[p4 >> shift]; if (bucket2[c4] <= p4) { do { c4++; } while (bucket2[c4] <= p4); } p4 = P[p4]; U4[i] = libsais_bswap16(c4);
uint16_t c5 = fastbits[p5 >> shift]; if (bucket2[c5] <= p5) { do { c5++; } while (bucket2[c5] <= p5); } p5 = P[p5]; U5[i] = libsais_bswap16(c5);
}
*i0 = p0; *i1 = p1; *i2 = p2; *i3 = p3; *i4 = p4; *i5 = p5;
}
static void libsais_unbwt_decode_7(uint8_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, fast_uint_t shift, fast_uint_t r, fast_uint_t * i0, fast_uint_t * i1, fast_uint_t * i2, fast_uint_t * i3, fast_uint_t * i4, fast_uint_t * i5, fast_uint_t * i6, fast_uint_t k)
{
uint16_t * RESTRICT U0 = (uint16_t *)(void *)U;
uint16_t * RESTRICT U1 = (uint16_t *)(void *)(((uint8_t *)U0) + r);
uint16_t * RESTRICT U2 = (uint16_t *)(void *)(((uint8_t *)U1) + r);
uint16_t * RESTRICT U3 = (uint16_t *)(void *)(((uint8_t *)U2) + r);
uint16_t * RESTRICT U4 = (uint16_t *)(void *)(((uint8_t *)U3) + r);
uint16_t * RESTRICT U5 = (uint16_t *)(void *)(((uint8_t *)U4) + r);
uint16_t * RESTRICT U6 = (uint16_t *)(void *)(((uint8_t *)U5) + r);
fast_uint_t i, p0 = *i0, p1 = *i1, p2 = *i2, p3 = *i3, p4 = *i4, p5 = *i5, p6 = *i6;
for (i = 0; i != k; ++i)
{
uint16_t c0 = fastbits[p0 >> shift]; if (bucket2[c0] <= p0) { do { c0++; } while (bucket2[c0] <= p0); } p0 = P[p0]; U0[i] = libsais_bswap16(c0);
uint16_t c1 = fastbits[p1 >> shift]; if (bucket2[c1] <= p1) { do { c1++; } while (bucket2[c1] <= p1); } p1 = P[p1]; U1[i] = libsais_bswap16(c1);
uint16_t c2 = fastbits[p2 >> shift]; if (bucket2[c2] <= p2) { do { c2++; } while (bucket2[c2] <= p2); } p2 = P[p2]; U2[i] = libsais_bswap16(c2);
uint16_t c3 = fastbits[p3 >> shift]; if (bucket2[c3] <= p3) { do { c3++; } while (bucket2[c3] <= p3); } p3 = P[p3]; U3[i] = libsais_bswap16(c3);
uint16_t c4 = fastbits[p4 >> shift]; if (bucket2[c4] <= p4) { do { c4++; } while (bucket2[c4] <= p4); } p4 = P[p4]; U4[i] = libsais_bswap16(c4);
uint16_t c5 = fastbits[p5 >> shift]; if (bucket2[c5] <= p5) { do { c5++; } while (bucket2[c5] <= p5); } p5 = P[p5]; U5[i] = libsais_bswap16(c5);
uint16_t c6 = fastbits[p6 >> shift]; if (bucket2[c6] <= p6) { do { c6++; } while (bucket2[c6] <= p6); } p6 = P[p6]; U6[i] = libsais_bswap16(c6);
}
*i0 = p0; *i1 = p1; *i2 = p2; *i3 = p3; *i4 = p4; *i5 = p5; *i6 = p6;
}
static void libsais_unbwt_decode_8(uint8_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, fast_uint_t shift, fast_uint_t r, fast_uint_t * i0, fast_uint_t * i1, fast_uint_t * i2, fast_uint_t * i3, fast_uint_t * i4, fast_uint_t * i5, fast_uint_t * i6, fast_uint_t * i7, fast_uint_t k)
{
uint16_t * RESTRICT U0 = (uint16_t *)(void *)U;
uint16_t * RESTRICT U1 = (uint16_t *)(void *)(((uint8_t *)U0) + r);
uint16_t * RESTRICT U2 = (uint16_t *)(void *)(((uint8_t *)U1) + r);
uint16_t * RESTRICT U3 = (uint16_t *)(void *)(((uint8_t *)U2) + r);
uint16_t * RESTRICT U4 = (uint16_t *)(void *)(((uint8_t *)U3) + r);
uint16_t * RESTRICT U5 = (uint16_t *)(void *)(((uint8_t *)U4) + r);
uint16_t * RESTRICT U6 = (uint16_t *)(void *)(((uint8_t *)U5) + r);
uint16_t * RESTRICT U7 = (uint16_t *)(void *)(((uint8_t *)U6) + r);
fast_uint_t i, p0 = *i0, p1 = *i1, p2 = *i2, p3 = *i3, p4 = *i4, p5 = *i5, p6 = *i6, p7 = *i7;
for (i = 0; i != k; ++i)
{
uint16_t c0 = fastbits[p0 >> shift]; if (bucket2[c0] <= p0) { do { c0++; } while (bucket2[c0] <= p0); } p0 = P[p0]; U0[i] = libsais_bswap16(c0);
uint16_t c1 = fastbits[p1 >> shift]; if (bucket2[c1] <= p1) { do { c1++; } while (bucket2[c1] <= p1); } p1 = P[p1]; U1[i] = libsais_bswap16(c1);
uint16_t c2 = fastbits[p2 >> shift]; if (bucket2[c2] <= p2) { do { c2++; } while (bucket2[c2] <= p2); } p2 = P[p2]; U2[i] = libsais_bswap16(c2);
uint16_t c3 = fastbits[p3 >> shift]; if (bucket2[c3] <= p3) { do { c3++; } while (bucket2[c3] <= p3); } p3 = P[p3]; U3[i] = libsais_bswap16(c3);
uint16_t c4 = fastbits[p4 >> shift]; if (bucket2[c4] <= p4) { do { c4++; } while (bucket2[c4] <= p4); } p4 = P[p4]; U4[i] = libsais_bswap16(c4);
uint16_t c5 = fastbits[p5 >> shift]; if (bucket2[c5] <= p5) { do { c5++; } while (bucket2[c5] <= p5); } p5 = P[p5]; U5[i] = libsais_bswap16(c5);
uint16_t c6 = fastbits[p6 >> shift]; if (bucket2[c6] <= p6) { do { c6++; } while (bucket2[c6] <= p6); } p6 = P[p6]; U6[i] = libsais_bswap16(c6);
uint16_t c7 = fastbits[p7 >> shift]; if (bucket2[c7] <= p7) { do { c7++; } while (bucket2[c7] <= p7); } p7 = P[p7]; U7[i] = libsais_bswap16(c7);
}
*i0 = p0; *i1 = p1; *i2 = p2; *i3 = p3; *i4 = p4; *i5 = p5; *i6 = p6; *i7 = p7;
}
static void libsais_unbwt_decode(uint8_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_sint_t n, sa_sint_t r, const sa_uint_t * RESTRICT I, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, fast_sint_t blocks, fast_uint_t reminder)
{
fast_uint_t shift = 0; while ((n >> shift) > (1 << UNBWT_FASTBITS)) { shift++; }
fast_uint_t offset = 0;
while (blocks > 8)
{
fast_uint_t i0 = I[0], i1 = I[1], i2 = I[2], i3 = I[3], i4 = I[4], i5 = I[5], i6 = I[6], i7 = I[7];
libsais_unbwt_decode_8(U + offset, P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, &i3, &i4, &i5, &i6, &i7, (fast_uint_t)r >> 1);
I += 8; blocks -= 8; offset += 8 * (fast_uint_t)r;
}
if (blocks == 1)
{
fast_uint_t i0 = I[0];
libsais_unbwt_decode_1(U + offset, P, bucket2, fastbits, shift, &i0, reminder >> 1);
}
else if (blocks == 2)
{
fast_uint_t i0 = I[0], i1 = I[1];
libsais_unbwt_decode_2(U + offset, P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, reminder >> 1);
libsais_unbwt_decode_1(U + offset + 2 * (reminder >> 1), P, bucket2, fastbits, shift, &i0, ((fast_uint_t)r >> 1) - (reminder >> 1));
}
else if (blocks == 3)
{
fast_uint_t i0 = I[0], i1 = I[1], i2 = I[2];
libsais_unbwt_decode_3(U + offset, P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, reminder >> 1);
libsais_unbwt_decode_2(U + offset + 2 * (reminder >> 1), P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, ((fast_uint_t)r >> 1) - (reminder >> 1));
}
else if (blocks == 4)
{
fast_uint_t i0 = I[0], i1 = I[1], i2 = I[2], i3 = I[3];
libsais_unbwt_decode_4(U + offset, P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, &i3, reminder >> 1);
libsais_unbwt_decode_3(U + offset + 2 * (reminder >> 1), P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, ((fast_uint_t)r >> 1) - (reminder >> 1));
}
else if (blocks == 5)
{
fast_uint_t i0 = I[0], i1 = I[1], i2 = I[2], i3 = I[3], i4 = I[4];
libsais_unbwt_decode_5(U + offset, P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, &i3, &i4, reminder >> 1);
libsais_unbwt_decode_4(U + offset + 2 * (reminder >> 1), P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, &i3, ((fast_uint_t)r >> 1) - (reminder >> 1));
}
else if (blocks == 6)
{
fast_uint_t i0 = I[0], i1 = I[1], i2 = I[2], i3 = I[3], i4 = I[4], i5 = I[5];
libsais_unbwt_decode_6(U + offset, P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, &i3, &i4, &i5, reminder >> 1);
libsais_unbwt_decode_5(U + offset + 2 * (reminder >> 1), P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, &i3, &i4, ((fast_uint_t)r >> 1) - (reminder >> 1));
}
else if (blocks == 7)
{
fast_uint_t i0 = I[0], i1 = I[1], i2 = I[2], i3 = I[3], i4 = I[4], i5 = I[5], i6 = I[6];
libsais_unbwt_decode_7(U + offset, P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, &i3, &i4, &i5, &i6, reminder >> 1);
libsais_unbwt_decode_6(U + offset + 2 * (reminder >> 1), P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, &i3, &i4, &i5, ((fast_uint_t)r >> 1) - (reminder >> 1));
}
else
{
fast_uint_t i0 = I[0], i1 = I[1], i2 = I[2], i3 = I[3], i4 = I[4], i5 = I[5], i6 = I[6], i7 = I[7];
libsais_unbwt_decode_8(U + offset, P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, &i3, &i4, &i5, &i6, &i7, reminder >> 1);
libsais_unbwt_decode_7(U + offset + 2 * (reminder >> 1), P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, &i3, &i4, &i5, &i6, ((fast_uint_t)r >> 1) - (reminder >> 1));
}
}
static void libsais_unbwt_decode_omp(const uint8_t * RESTRICT T, uint8_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_sint_t n, sa_sint_t r, const sa_uint_t * RESTRICT I, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, sa_sint_t threads)
{
fast_uint_t lastc = T[0];
fast_sint_t blocks = 1 + (((fast_sint_t)n - 1) / (fast_sint_t)r);
fast_uint_t reminder = (fast_uint_t)n - ((fast_uint_t)r * ((fast_uint_t)blocks - 1));
#if defined(_OPENMP)
fast_sint_t max_threads = blocks < threads ? blocks : threads;
#pragma omp parallel num_threads(max_threads) if(max_threads > 1 && n >= 65536)
#endif
{
#if defined(_OPENMP)
fast_sint_t omp_thread_num = omp_get_thread_num();
fast_sint_t omp_num_threads = omp_get_num_threads();
#else
UNUSED(threads);
fast_sint_t omp_thread_num = 0;
fast_sint_t omp_num_threads = 1;
#endif
fast_sint_t omp_block_stride = blocks / omp_num_threads;
fast_sint_t omp_block_reminder = blocks % omp_num_threads;
fast_sint_t omp_block_size = omp_block_stride + (omp_thread_num < omp_block_reminder);
fast_sint_t omp_block_start = omp_block_stride * omp_thread_num + (omp_thread_num < omp_block_reminder ? omp_thread_num : omp_block_reminder);
libsais_unbwt_decode(U + r * omp_block_start, P, n, r, I + omp_block_start, bucket2, fastbits, omp_block_size, omp_thread_num < omp_num_threads - 1 ? (fast_uint_t)r : reminder);
}
U[n - 1] = (uint8_t)lastc;
}
static sa_sint_t libsais_unbwt_core(const uint8_t * RESTRICT T, uint8_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_sint_t n, sa_sint_t r, const sa_uint_t * RESTRICT I, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, sa_uint_t * RESTRICT buckets, sa_sint_t threads)
{
#if defined(_OPENMP)
if (threads > 1 && n >= 262144)
{
libsais_unbwt_init_parallel(T, P, n, I, bucket2, fastbits, buckets, threads);
}
else
#else
UNUSED(buckets);
#endif
{
libsais_unbwt_init_single(T, P, n, I, bucket2, fastbits);
}
libsais_unbwt_decode_omp(T, U, P, n, r, I, bucket2, fastbits, threads);
return 0;
}
static sa_sint_t libsais_unbwt_main(const uint8_t * T, uint8_t * U, sa_uint_t * P, sa_sint_t n, sa_sint_t r, const sa_uint_t * I, sa_sint_t threads)
{
fast_uint_t shift = 0; while ((n >> shift) > (1 << UNBWT_FASTBITS)) { shift++; }
sa_uint_t * RESTRICT bucket2 = (sa_uint_t *)libsais_alloc_aligned(ALPHABET_SIZE * ALPHABET_SIZE * sizeof(sa_uint_t), 4096);
uint16_t * RESTRICT fastbits = (uint16_t *)libsais_alloc_aligned(((size_t)1 + (size_t)(n >> shift)) * sizeof(uint16_t), 4096);
sa_uint_t * RESTRICT buckets = threads > 1 ? (sa_uint_t *)libsais_alloc_aligned((size_t)threads * (ALPHABET_SIZE + (ALPHABET_SIZE * ALPHABET_SIZE)) * sizeof(sa_uint_t), 4096) : NULL;
sa_sint_t index = bucket2 != NULL && fastbits != NULL && (buckets != NULL || threads == 1)
? libsais_unbwt_core(T, U, P, n, r, I, bucket2, fastbits, buckets, threads)
: -2;
libsais_free_aligned(buckets);
libsais_free_aligned(fastbits);
libsais_free_aligned(bucket2);
return index;
}
static sa_sint_t libsais_unbwt_main_ctx(const LIBSAIS_UNBWT_CONTEXT * ctx, const uint8_t * T, uint8_t * U, sa_uint_t * P, sa_sint_t n, sa_sint_t r, const sa_uint_t * I)
{
return ctx != NULL && ctx->bucket2 != NULL && ctx->fastbits != NULL && (ctx->buckets != NULL || ctx->threads == 1)
? libsais_unbwt_core(T, U, P, n, r, I, ctx->bucket2, ctx->fastbits, ctx->buckets, (sa_sint_t)ctx->threads)
: -2;
}
void * libsais_unbwt_create_ctx(void)
{
return (void *)libsais_unbwt_create_ctx_main(1);
}
void libsais_unbwt_free_ctx(void * ctx)
{
libsais_unbwt_free_ctx_main((LIBSAIS_UNBWT_CONTEXT *)ctx);
}
int32_t libsais_unbwt(const uint8_t * T, uint8_t * U, int32_t * A, int32_t n, int32_t i)
{
return libsais_unbwt_aux(T, U, A, n, n, &i);
}
int32_t libsais_unbwt_ctx(const void * ctx, const uint8_t * T, uint8_t * U, int32_t * A, int32_t n, int32_t i)
{
return libsais_unbwt_aux_ctx(ctx, T, U, A, n, n, &i);
}
int32_t libsais_unbwt_aux(const uint8_t * T, uint8_t * U, int32_t * A, int32_t n, int32_t r, const int32_t * I)
{
if ((T == NULL) || (U == NULL) || (A == NULL) || (n < 0) || ((r != n) && ((r < 2) || ((r & (r - 1)) != 0))) || (I == NULL))
{
return -1;
}
else if (n <= 1)
{
if (I[0] != n) { return -1; }
if (n == 1) { U[0] = T[0]; }
return 0;
}
fast_sint_t t; for (t = 0; t <= (n - 1) / r; ++t) { if (I[t] <= 0 || I[t] > n) { return -1; } }
return libsais_unbwt_main(T, U, (sa_uint_t *)A, n, r, (const sa_uint_t *)I, 1);
}
int32_t libsais_unbwt_aux_ctx(const void * ctx, const uint8_t * T, uint8_t * U, int32_t * A, int32_t n, int32_t r, const int32_t * I)
{
if ((T == NULL) || (U == NULL) || (A == NULL) || (n < 0) || ((r != n) && ((r < 2) || ((r & (r - 1)) != 0))) || (I == NULL))
{
return -1;
}
else if (n <= 1)
{
if (I[0] != n) { return -1; }
if (n == 1) { U[0] = T[0]; }
return 0;
}
fast_sint_t t; for (t = 0; t <= (n - 1) / r; ++t) { if (I[t] <= 0 || I[t] > n) { return -1; } }
return libsais_unbwt_main_ctx((const LIBSAIS_UNBWT_CONTEXT *)ctx, T, U, (sa_uint_t *)A, n, r, (const sa_uint_t *)I);
}
#if defined(_OPENMP)
void * libsais_unbwt_create_ctx_omp(int32_t threads)
{
if (threads < 0) { return NULL; }
threads = threads > 0 ? threads : omp_get_max_threads();
return (void *)libsais_unbwt_create_ctx_main(threads);
}
int32_t libsais_unbwt_omp(const uint8_t * T, uint8_t * U, int32_t * A, int32_t n, int32_t i, int32_t threads)
{
return libsais_unbwt_aux_omp(T, U, A, n, n, &i, threads);
}
int32_t libsais_unbwt_aux_omp(const uint8_t * T, uint8_t * U, int32_t * A, int32_t n, int32_t r, const int32_t * I, int32_t threads)
{
if ((T == NULL) || (U == NULL) || (A == NULL) || (n < 0) || ((r != n) && ((r < 2) || ((r & (r - 1)) != 0))) || (I == NULL) || (threads < 0))
{
return -1;
}
else if (n <= 1)
{
if (I[0] != n) { return -1; }
if (n == 1) { U[0] = T[0]; }
return 0;
}
fast_sint_t t; for (t = 0; t <= (n - 1) / r; ++t) { if (I[t] <= 0 || I[t] > n) { return -1; } }
threads = threads > 0 ? threads : omp_get_max_threads();
return libsais_unbwt_main(T, U, (sa_uint_t *)A, n, r, (const sa_uint_t *)I, threads);
}
#endif
|
prior_box_op.h | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <algorithm>
#include <vector>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/platform/transform.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace paddle {
namespace operators {
constexpr int kPriorBoxFLOAT = 1;
constexpr int kPriorBoxDOUBLE = 2;
inline void ExpandAspectRatios(const std::vector<float>& input_aspect_ratior,
bool flip,
std::vector<float>* output_aspect_ratior) {
constexpr float epsilon = 1e-6;
output_aspect_ratior->clear();
output_aspect_ratior->push_back(1.0f);
for (size_t i = 0; i < input_aspect_ratior.size(); ++i) {
float ar = input_aspect_ratior[i];
bool already_exist = false;
for (size_t j = 0; j < output_aspect_ratior->size(); ++j) {
if (fabs(ar - output_aspect_ratior->at(j)) < epsilon) {
already_exist = true;
break;
}
}
if (!already_exist) {
output_aspect_ratior->push_back(ar);
if (flip) {
output_aspect_ratior->push_back(1.0f / ar);
}
}
}
}
template <typename T, typename K>
class PriorBoxOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* input = ctx.Input<paddle::framework::Tensor>("Input");
auto* image = ctx.Input<paddle::framework::Tensor>("Image");
auto* boxes = ctx.Output<paddle::framework::Tensor>("Boxes");
auto* vars = ctx.Output<paddle::framework::Tensor>("Variances");
auto min_sizes = ctx.Attr<std::vector<float>>("min_sizes");
auto max_sizes = ctx.Attr<std::vector<float>>("max_sizes");
auto input_aspect_ratio = ctx.Attr<std::vector<float>>("aspect_ratios");
auto variances = ctx.Attr<std::vector<float>>("variances");
auto flip = ctx.Attr<bool>("flip");
auto clip = ctx.Attr<bool>("clip");
auto min_max_aspect_ratios_order =
ctx.Attr<bool>("min_max_aspect_ratios_order");
std::vector<float> aspect_ratios;
ExpandAspectRatios(input_aspect_ratio, flip, &aspect_ratios);
K step_w = static_cast<K>(ctx.Attr<float>("step_w"));
K step_h = static_cast<K>(ctx.Attr<float>("step_h"));
K offset = static_cast<K>(ctx.Attr<float>("offset"));
auto img_width = image->dims()[3];
auto img_height = image->dims()[2];
auto feature_width = input->dims()[3];
auto feature_height = input->dims()[2];
K step_width, step_height;
if (step_w == 0 || step_h == 0) {
step_width = static_cast<K>(img_width) / feature_width;
step_height = static_cast<K>(img_height) / feature_height;
} else {
step_width = step_w;
step_height = step_h;
}
int num_priors = aspect_ratios.size() * min_sizes.size();
if (max_sizes.size() > 0) {
num_priors += max_sizes.size();
}
boxes->mutable_data<K>(ctx.GetPlace());
vars->mutable_data<K>(ctx.GetPlace());
K* b_t = boxes->data<K>();
for (int h = 0; h < feature_height; ++h) {
for (int w = 0; w < feature_width; ++w) {
K center_x = (w + offset) * step_width;
K center_y = (h + offset) * step_height;
K box_width, box_height;
for (size_t s = 0; s < min_sizes.size(); ++s) {
auto min_size = min_sizes[s];
if (min_max_aspect_ratios_order) {
box_width = box_height = min_size / 2.;
b_t[0] = (center_x - box_width) / img_width;
b_t[1] = (center_y - box_height) / img_height;
b_t[2] = (center_x + box_width) / img_width;
b_t[3] = (center_y + box_height) / img_height;
b_t += 4;
if (max_sizes.size() > 0) {
auto max_size = max_sizes[s];
// square prior with size sqrt(minSize * maxSize)
box_width = box_height = sqrt(min_size * max_size) / 2.;
b_t[0] = (center_x - box_width) / img_width;
b_t[1] = (center_y - box_height) / img_height;
b_t[2] = (center_x + box_width) / img_width;
b_t[3] = (center_y + box_height) / img_height;
b_t += 4;
}
// priors with different aspect ratios
for (size_t r = 0; r < aspect_ratios.size(); ++r) {
float ar = aspect_ratios[r];
if (fabs(ar - 1.) < 1e-6) {
continue;
}
box_width = min_size * sqrt(ar) / 2.;
box_height = min_size / sqrt(ar) / 2.;
b_t[0] = (center_x - box_width) / img_width;
b_t[1] = (center_y - box_height) / img_height;
b_t[2] = (center_x + box_width) / img_width;
b_t[3] = (center_y + box_height) / img_height;
b_t += 4;
}
} else {
// priors with different aspect ratios
for (size_t r = 0; r < aspect_ratios.size(); ++r) {
float ar = aspect_ratios[r];
box_width = min_size * sqrt(ar) / 2.;
box_height = min_size / sqrt(ar) / 2.;
b_t[0] = (center_x - box_width) / img_width;
b_t[1] = (center_y - box_height) / img_height;
b_t[2] = (center_x + box_width) / img_width;
b_t[3] = (center_y + box_height) / img_height;
b_t += 4;
}
if (max_sizes.size() > 0) {
auto max_size = max_sizes[s];
// square prior with size sqrt(minSize * maxSize)
box_width = box_height = sqrt(min_size * max_size) / 2.;
b_t[0] = (center_x - box_width) / img_width;
b_t[1] = (center_y - box_height) / img_height;
b_t[2] = (center_x + box_width) / img_width;
b_t[3] = (center_y + box_height) / img_height;
b_t += 4;
}
}
}
}
}
if (clip) {
K* dt = boxes->data<K>();
std::transform(dt, dt + boxes->numel(), dt, [](K v) -> K {
return std::min<K>(std::max<K>(v, 0.), 1.);
});
}
framework::Tensor var_t;
var_t.mutable_data<K>(
phi::make_ddim({1, static_cast<int>(variances.size())}),
ctx.GetPlace());
auto var_et = framework::EigenTensor<K, 2>::From(var_t);
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (size_t i = 0; i < variances.size(); ++i) {
var_et(0, i) = variances[i];
}
int box_num = feature_height * feature_width * num_priors;
auto var_dim = vars->dims();
vars->Resize({box_num, static_cast<int>(variances.size())});
auto e_vars = framework::EigenMatrix<K, Eigen::RowMajor>::From(*vars);
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for collapse(2)
#endif
for (int i = 0; i < box_num; ++i) {
for (size_t j = 0; j < variances.size(); ++j) {
e_vars(i, j) = variances[j];
}
}
vars->Resize(var_dim);
}
}; // namespace operators
} // namespace operators
} // namespace paddle
|
GB_binop__islt_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__islt_uint8
// A.*B function (eWiseMult): GB_AemultB__islt_uint8
// A*D function (colscale): GB_AxD__islt_uint8
// D*A function (rowscale): GB_DxB__islt_uint8
// C+=B function (dense accum): GB_Cdense_accumB__islt_uint8
// C+=b function (dense accum): GB_Cdense_accumb__islt_uint8
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__islt_uint8
// C=scalar+B GB_bind1st__islt_uint8
// C=scalar+B' GB_bind1st_tran__islt_uint8
// C=A+scalar GB_bind2nd__islt_uint8
// C=A'+scalar GB_bind2nd_tran__islt_uint8
// C type: uint8_t
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x < y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLT || GxB_NO_UINT8 || GxB_NO_ISLT_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__islt_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__islt_uint8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__islt_uint8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__islt_uint8
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__islt_uint8
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__islt_uint8
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__islt_uint8
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__islt_uint8
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = Bx [p] ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__islt_uint8
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = Ax [p] ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB_bind1st_tran__islt_uint8
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB_bind2nd_tran__islt_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__iseq_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__iseq_fp64
// A.*B function (eWiseMult): GB_AemultB__iseq_fp64
// A*D function (colscale): GB_AxD__iseq_fp64
// D*A function (rowscale): GB_DxB__iseq_fp64
// C+=B function (dense accum): GB_Cdense_accumB__iseq_fp64
// C+=b function (dense accum): GB_Cdense_accumb__iseq_fp64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__iseq_fp64
// C=scalar+B GB_bind1st__iseq_fp64
// C=scalar+B' GB_bind1st_tran__iseq_fp64
// C=A+scalar GB_bind2nd__iseq_fp64
// C=A'+scalar GB_bind2nd_tran__iseq_fp64
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
double bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x == y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISEQ || GxB_NO_FP64 || GxB_NO_ISEQ_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__iseq_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__iseq_fp64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__iseq_fp64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__iseq_fp64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *GB_RESTRICT Cx = (double *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__iseq_fp64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *GB_RESTRICT Cx = (double *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__iseq_fp64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__iseq_fp64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__iseq_fp64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double bij = Bx [p] ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__iseq_fp64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB_bind1st_tran__iseq_fp64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB_bind2nd_tran__iseq_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__identity_uint32_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_uint32_int64
// op(A') function: GB_tran__identity_uint32_int64
// C type: uint32_t
// A type: int64_t
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint32_t z = (uint32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_uint32_int64
(
uint32_t *restrict Cx,
const int64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_uint32_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
PhysicalConstants.h | #ifndef included_PhysicalConstants
#define included_PhysicalConstants
namespace Thermo4PFM
{
#ifdef HAVE_OPENMP_OFFLOAD
#pragma omp declare target
#endif
const double gas_constant_R_JpKpmol = 8.314472; // J K-1 mol-1
#ifdef HAVE_OPENMP_OFFLOAD
#pragma omp end declare target
#endif
}
#endif
|
cache.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC AAA CCCC H H EEEEE %
% C A A C H H E %
% C AAAAA C HHHHH EEE %
% C A A C H H E %
% CCCC A A CCCC H H EEEEE %
% %
% %
% MagickCore Pixel Cache Methods %
% %
% Software Design %
% Cristy %
% July 1999 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/distribute-cache-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/nt-base-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/policy.h"
#include "MagickCore/quantum.h"
#include "MagickCore/random_.h"
#include "MagickCore/registry.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
#if defined(MAGICKCORE_ZLIB_DELEGATE)
#include "zlib.h"
#endif
/*
Define declarations.
*/
#define CacheTick(offset,extent) QuantumTick((MagickOffsetType) offset,extent)
#define IsFileDescriptorLimitExceeded() (GetMagickResource(FileResource) > \
GetMagickResourceLimit(FileResource) ? MagickTrue : MagickFalse)
/*
Typedef declarations.
*/
typedef struct _MagickModulo
{
ssize_t
quotient,
remainder;
} MagickModulo;
/*
Forward declarations.
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static Cache
GetImagePixelCache(Image *,const MagickBooleanType,ExceptionInfo *)
magick_hot_spot;
static const Quantum
*GetVirtualPixelCache(const Image *,const VirtualPixelMethod,const ssize_t,
const ssize_t,const size_t,const size_t,ExceptionInfo *),
*GetVirtualPixelsCache(const Image *);
static const void
*GetVirtualMetacontentFromCache(const Image *);
static MagickBooleanType
GetOneAuthenticPixelFromCache(Image *,const ssize_t,const ssize_t,Quantum *,
ExceptionInfo *),
GetOneVirtualPixelFromCache(const Image *,const VirtualPixelMethod,
const ssize_t,const ssize_t,Quantum *,ExceptionInfo *),
OpenPixelCache(Image *,const MapMode,ExceptionInfo *),
OpenPixelCacheOnDisk(CacheInfo *,const MapMode),
ReadPixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict,
ExceptionInfo *),
ReadPixelCacheMetacontent(CacheInfo *magick_restrict,
NexusInfo *magick_restrict,ExceptionInfo *),
SyncAuthenticPixelsCache(Image *,ExceptionInfo *),
WritePixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict,
ExceptionInfo *),
WritePixelCacheMetacontent(CacheInfo *,NexusInfo *magick_restrict,
ExceptionInfo *);
static Quantum
*GetAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t,
const size_t,ExceptionInfo *),
*QueueAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t,
const size_t,ExceptionInfo *),
*SetPixelCacheNexusPixels(const CacheInfo *,const MapMode,
const RectangleInfo *,NexusInfo *,ExceptionInfo *) magick_hot_spot;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
static void
CopyOpenCLBuffer(CacheInfo *magick_restrict);
#endif
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
/*
Global declarations.
*/
static volatile MagickBooleanType
instantiate_cache = MagickFalse;
static SemaphoreInfo
*cache_semaphore = (SemaphoreInfo *) NULL;
static ssize_t
cache_anonymous_memory = (-1);
static time_t
cache_epoch = 0;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCache() acquires a pixel cache.
%
% The format of the AcquirePixelCache() method is:
%
% Cache AcquirePixelCache(const size_t number_threads)
%
% A description of each parameter follows:
%
% o number_threads: the number of nexus threads.
%
*/
MagickPrivate Cache AcquirePixelCache(const size_t number_threads)
{
CacheInfo
*magick_restrict cache_info;
char
*value;
cache_info=(CacheInfo *) AcquireQuantumMemory(1,sizeof(*cache_info));
if (cache_info == (CacheInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(cache_info,0,sizeof(*cache_info));
cache_info->type=UndefinedCache;
cache_info->mode=IOMode;
cache_info->colorspace=sRGBColorspace;
cache_info->file=(-1);
cache_info->id=GetMagickThreadId();
cache_info->number_threads=number_threads;
if (GetOpenMPMaximumThreads() > cache_info->number_threads)
cache_info->number_threads=GetOpenMPMaximumThreads();
if (GetMagickResourceLimit(ThreadResource) > cache_info->number_threads)
cache_info->number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
if (cache_info->number_threads == 0)
cache_info->number_threads=1;
cache_info->nexus_info=AcquirePixelCacheNexus(cache_info->number_threads);
if (cache_info->nexus_info == (NexusInfo **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
value=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (value != (const char *) NULL)
{
cache_info->synchronize=IsStringTrue(value);
value=DestroyString(value);
}
value=GetPolicyValue("cache:synchronize");
if (value != (const char *) NULL)
{
cache_info->synchronize=IsStringTrue(value);
value=DestroyString(value);
}
cache_info->semaphore=AcquireSemaphoreInfo();
cache_info->reference_count=1;
cache_info->file_semaphore=AcquireSemaphoreInfo();
cache_info->debug=IsEventLogging();
cache_info->signature=MagickCoreSignature;
return((Cache ) cache_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCacheNexus() allocates the NexusInfo structure.
%
% The format of the AcquirePixelCacheNexus method is:
%
% NexusInfo **AcquirePixelCacheNexus(const size_t number_threads)
%
% A description of each parameter follows:
%
% o number_threads: the number of nexus threads.
%
*/
MagickPrivate NexusInfo **AcquirePixelCacheNexus(const size_t number_threads)
{
NexusInfo
**magick_restrict nexus_info;
register ssize_t
i;
nexus_info=(NexusInfo **) MagickAssumeAligned(AcquireAlignedMemory(
number_threads,sizeof(*nexus_info)));
if (nexus_info == (NexusInfo **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
nexus_info[0]=(NexusInfo *) AcquireQuantumMemory(number_threads,
sizeof(**nexus_info));
if (nexus_info[0] == (NexusInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(nexus_info[0],0,number_threads*sizeof(**nexus_info));
for (i=0; i < (ssize_t) number_threads; i++)
{
nexus_info[i]=(&nexus_info[0][i]);
nexus_info[i]->signature=MagickCoreSignature;
}
return(nexus_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCachePixels() returns the pixels associated with the specified
% image.
%
% The format of the AcquirePixelCachePixels() method is:
%
% const void *AcquirePixelCachePixels(const Image *image,
% MagickSizeType *length,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o length: the pixel cache length.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate const void *AcquirePixelCachePixels(const Image *image,
MagickSizeType *length,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*length=0;
if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache))
return((const void *) NULL);
*length=cache_info->length;
return((const void *) cache_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a c h e C o m p o n e n t G e n e s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CacheComponentGenesis() instantiates the cache component.
%
% The format of the CacheComponentGenesis method is:
%
% MagickBooleanType CacheComponentGenesis(void)
%
*/
MagickPrivate MagickBooleanType CacheComponentGenesis(void)
{
if (cache_semaphore == (SemaphoreInfo *) NULL)
cache_semaphore=AcquireSemaphoreInfo();
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a c h e C o m p o n e n t T e r m i n u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CacheComponentTerminus() destroys the cache component.
%
% The format of the CacheComponentTerminus() method is:
%
% CacheComponentTerminus(void)
%
*/
MagickPrivate void CacheComponentTerminus(void)
{
if (cache_semaphore == (SemaphoreInfo *) NULL)
ActivateSemaphoreInfo(&cache_semaphore);
LockSemaphoreInfo(cache_semaphore);
instantiate_cache=MagickFalse;
UnlockSemaphoreInfo(cache_semaphore);
RelinquishSemaphoreInfo(&cache_semaphore);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCache() clones a pixel cache.
%
% The format of the ClonePixelCache() method is:
%
% Cache ClonePixelCache(const Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
MagickPrivate Cache ClonePixelCache(const Cache cache)
{
CacheInfo
*magick_restrict clone_info;
const CacheInfo
*magick_restrict cache_info;
assert(cache != NULL);
cache_info=(const CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
clone_info=(CacheInfo *) AcquirePixelCache(cache_info->number_threads);
if (clone_info == (Cache) NULL)
return((Cache) NULL);
clone_info->virtual_pixel_method=cache_info->virtual_pixel_method;
return((Cache ) clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCacheMethods() clones the pixel cache methods from one cache to
% another.
%
% The format of the ClonePixelCacheMethods() method is:
%
% void ClonePixelCacheMethods(Cache clone,const Cache cache)
%
% A description of each parameter follows:
%
% o clone: Specifies a pointer to a Cache structure.
%
% o cache: the pixel cache.
%
*/
MagickPrivate void ClonePixelCacheMethods(Cache clone,const Cache cache)
{
CacheInfo
*magick_restrict cache_info,
*magick_restrict source_info;
assert(clone != (Cache) NULL);
source_info=(CacheInfo *) clone;
assert(source_info->signature == MagickCoreSignature);
if (source_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
source_info->filename);
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
source_info->methods=cache_info->methods;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e R e p o s i t o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% ClonePixelCacheRepository() clones the source pixel cache to the destination
% cache.
%
% The format of the ClonePixelCacheRepository() method is:
%
% MagickBooleanType ClonePixelCacheRepository(CacheInfo *cache_info,
% CacheInfo *source_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o source_info: the source pixel cache.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ClonePixelCacheOnDisk(
CacheInfo *magick_restrict cache_info,CacheInfo *magick_restrict clone_info)
{
MagickSizeType
extent;
size_t
quantum;
ssize_t
count;
struct stat
file_stats;
unsigned char
*buffer;
/*
Clone pixel cache on disk with identifcal morphology.
*/
if ((OpenPixelCacheOnDisk(cache_info,ReadMode) == MagickFalse) ||
(OpenPixelCacheOnDisk(clone_info,IOMode) == MagickFalse))
return(MagickFalse);
quantum=(size_t) MagickMaxBufferExtent;
if ((fstat(cache_info->file,&file_stats) == 0) && (file_stats.st_size > 0))
quantum=(size_t) MagickMin(file_stats.st_size,MagickMaxBufferExtent);
buffer=(unsigned char *) AcquireQuantumMemory(quantum,sizeof(*buffer));
if (buffer == (unsigned char *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
extent=0;
while ((count=read(cache_info->file,buffer,quantum)) > 0)
{
ssize_t
number_bytes;
number_bytes=write(clone_info->file,buffer,(size_t) count);
if (number_bytes != count)
break;
extent+=number_bytes;
}
buffer=(unsigned char *) RelinquishMagickMemory(buffer);
if (extent != cache_info->length)
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType ClonePixelCacheRepository(
CacheInfo *magick_restrict clone_info,CacheInfo *magick_restrict cache_info,
ExceptionInfo *exception)
{
#define MaxCacheThreads 2
#define cache_threads(source,destination) \
num_threads(((source)->type == DiskCache) || \
((destination)->type == DiskCache) || (((source)->rows) < \
(16*GetMagickResourceLimit(ThreadResource))) ? 1 : \
GetMagickResourceLimit(ThreadResource) < MaxCacheThreads ? \
GetMagickResourceLimit(ThreadResource) : MaxCacheThreads)
MagickBooleanType
optimize,
status;
NexusInfo
**magick_restrict cache_nexus,
**magick_restrict clone_nexus;
size_t
length;
ssize_t
y;
assert(cache_info != (CacheInfo *) NULL);
assert(clone_info != (CacheInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
if (cache_info->type == PingCache)
return(MagickTrue);
length=cache_info->number_channels*sizeof(*cache_info->channel_map);
if ((cache_info->columns == clone_info->columns) &&
(cache_info->rows == clone_info->rows) &&
(cache_info->number_channels == clone_info->number_channels) &&
(memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) &&
(cache_info->metacontent_extent == clone_info->metacontent_extent))
{
/*
Identical pixel cache morphology.
*/
if (((cache_info->type == MemoryCache) ||
(cache_info->type == MapCache)) &&
((clone_info->type == MemoryCache) ||
(clone_info->type == MapCache)))
{
(void) memcpy(clone_info->pixels,cache_info->pixels,
cache_info->columns*cache_info->number_channels*cache_info->rows*
sizeof(*cache_info->pixels));
if ((cache_info->metacontent_extent != 0) &&
(clone_info->metacontent_extent != 0))
(void) memcpy(clone_info->metacontent,cache_info->metacontent,
cache_info->columns*cache_info->rows*
clone_info->metacontent_extent*sizeof(unsigned char));
return(MagickTrue);
}
if ((cache_info->type == DiskCache) && (clone_info->type == DiskCache))
return(ClonePixelCacheOnDisk(cache_info,clone_info));
}
/*
Mismatched pixel cache morphology.
*/
cache_nexus=AcquirePixelCacheNexus(MaxCacheThreads);
clone_nexus=AcquirePixelCacheNexus(MaxCacheThreads);
if ((cache_nexus == (NexusInfo **) NULL) ||
(clone_nexus == (NexusInfo **) NULL))
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
length=cache_info->number_channels*sizeof(*cache_info->channel_map);
optimize=(cache_info->number_channels == clone_info->number_channels) &&
(memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) ?
MagickTrue : MagickFalse;
length=(size_t) MagickMin(cache_info->columns*cache_info->number_channels,
clone_info->columns*clone_info->number_channels);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
cache_threads(cache_info,clone_info)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*pixels;
RectangleInfo
region;
register ssize_t
x;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
region.width=cache_info->columns;
region.height=1;
region.x=0;
region.y=y;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,®ion,
cache_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
status=ReadPixelCachePixels(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
region.width=clone_info->columns;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,®ion,
clone_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
(void) ResetMagickMemory(clone_nexus[id]->pixels,0,(size_t)
clone_nexus[id]->length);
if (optimize != MagickFalse)
(void) memcpy(clone_nexus[id]->pixels,cache_nexus[id]->pixels,length*
sizeof(Quantum));
else
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
/*
Mismatched pixel channel map.
*/
p=cache_nexus[id]->pixels;
q=clone_nexus[id]->pixels;
for (x=0; x < (ssize_t) cache_info->columns; x++)
{
register ssize_t
i;
if (x == (ssize_t) clone_info->columns)
break;
for (i=0; i < (ssize_t) clone_info->number_channels; i++)
{
PixelChannel
channel;
PixelTrait
traits;
channel=clone_info->channel_map[i].channel;
traits=cache_info->channel_map[channel].traits;
if (traits != UndefinedPixelTrait)
*q=*(p+cache_info->channel_map[channel].offset);
q++;
}
p+=cache_info->number_channels;
}
}
status=WritePixelCachePixels(clone_info,clone_nexus[id],exception);
}
if ((cache_info->metacontent_extent != 0) &&
(clone_info->metacontent_extent != 0))
{
/*
Clone metacontent.
*/
length=(size_t) MagickMin(cache_info->metacontent_extent,
clone_info->metacontent_extent);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
cache_threads(cache_info,clone_info)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*pixels;
RectangleInfo
region;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
region.width=cache_info->columns;
region.height=1;
region.x=0;
region.y=y;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,®ion,
cache_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
status=ReadPixelCacheMetacontent(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
region.width=clone_info->columns;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,®ion,
clone_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
if ((clone_nexus[id]->metacontent != (void *) NULL) &&
(cache_nexus[id]->metacontent != (void *) NULL))
(void) memcpy(clone_nexus[id]->metacontent,
cache_nexus[id]->metacontent,length*sizeof(unsigned char));
status=WritePixelCacheMetacontent(clone_info,clone_nexus[id],exception);
}
}
cache_nexus=DestroyPixelCacheNexus(cache_nexus,MaxCacheThreads);
clone_nexus=DestroyPixelCacheNexus(clone_nexus,MaxCacheThreads);
if (cache_info->debug != MagickFalse)
{
char
message[MagickPathExtent];
(void) FormatLocaleString(message,MagickPathExtent,"%s => %s",
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type),
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) clone_info->type));
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImagePixelCache() deallocates memory associated with the pixel cache.
%
% The format of the DestroyImagePixelCache() method is:
%
% void DestroyImagePixelCache(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void DestroyImagePixelCache(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->cache == (void *) NULL)
return;
image->cache=DestroyPixelCache(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImagePixels() deallocates memory associated with the pixel cache.
%
% The format of the DestroyImagePixels() method is:
%
% void DestroyImagePixels(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DestroyImagePixels(Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.destroy_pixel_handler != (DestroyPixelHandler) NULL)
{
cache_info->methods.destroy_pixel_handler(image);
return;
}
image->cache=DestroyPixelCache(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelCache() deallocates memory associated with the pixel cache.
%
% The format of the DestroyPixelCache() method is:
%
% Cache DestroyPixelCache(Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
static MagickBooleanType ClosePixelCacheOnDisk(CacheInfo *cache_info)
{
int
status;
status=(-1);
if (cache_info->file != -1)
{
status=close(cache_info->file);
cache_info->file=(-1);
RelinquishMagickResource(FileResource,1);
}
return(status == -1 ? MagickFalse : MagickTrue);
}
static inline void RelinquishPixelCachePixels(CacheInfo *cache_info)
{
switch (cache_info->type)
{
case MemoryCache:
{
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (cache_info->opencl != (MagickCLCacheInfo) NULL)
{
cache_info->opencl=RelinquishMagickCLCacheInfo(cache_info->opencl,
MagickTrue);
cache_info->pixels=(Quantum *) NULL;
break;
}
#endif
if (cache_info->mapped == MagickFalse)
cache_info->pixels=(Quantum *) RelinquishAlignedMemory(
cache_info->pixels);
else
(void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length);
RelinquishMagickResource(MemoryResource,cache_info->length);
break;
}
case MapCache:
{
(void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length);
cache_info->pixels=(Quantum *) NULL;
if (cache_info->mode != ReadMode)
(void) RelinquishUniqueFileResource(cache_info->cache_filename);
*cache_info->cache_filename='\0';
RelinquishMagickResource(MapResource,cache_info->length);
}
case DiskCache:
{
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
if (cache_info->mode != ReadMode)
(void) RelinquishUniqueFileResource(cache_info->cache_filename);
*cache_info->cache_filename='\0';
RelinquishMagickResource(DiskResource,cache_info->length);
break;
}
case DistributedCache:
{
*cache_info->cache_filename='\0';
(void) RelinquishDistributePixelCache((DistributeCacheInfo *)
cache_info->server_info);
break;
}
default:
break;
}
cache_info->type=UndefinedCache;
cache_info->mapped=MagickFalse;
cache_info->metacontent=(void *) NULL;
}
MagickPrivate Cache DestroyPixelCache(Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
LockSemaphoreInfo(cache_info->semaphore);
cache_info->reference_count--;
if (cache_info->reference_count != 0)
{
UnlockSemaphoreInfo(cache_info->semaphore);
return((Cache) NULL);
}
UnlockSemaphoreInfo(cache_info->semaphore);
if (cache_info->debug != MagickFalse)
{
char
message[MagickPathExtent];
(void) FormatLocaleString(message,MagickPathExtent,"destroy %s",
cache_info->filename);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
RelinquishPixelCachePixels(cache_info);
if (cache_info->server_info != (DistributeCacheInfo *) NULL)
cache_info->server_info=DestroyDistributeCacheInfo((DistributeCacheInfo *)
cache_info->server_info);
if (cache_info->nexus_info != (NexusInfo **) NULL)
cache_info->nexus_info=DestroyPixelCacheNexus(cache_info->nexus_info,
cache_info->number_threads);
if (cache_info->random_info != (RandomInfo *) NULL)
cache_info->random_info=DestroyRandomInfo(cache_info->random_info);
if (cache_info->file_semaphore != (SemaphoreInfo *) NULL)
RelinquishSemaphoreInfo(&cache_info->file_semaphore);
if (cache_info->semaphore != (SemaphoreInfo *) NULL)
RelinquishSemaphoreInfo(&cache_info->semaphore);
cache_info->signature=(~MagickCoreSignature);
cache_info=(CacheInfo *) RelinquishMagickMemory(cache_info);
cache=(Cache) NULL;
return(cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelCacheNexus() destroys a pixel cache nexus.
%
% The format of the DestroyPixelCacheNexus() method is:
%
% NexusInfo **DestroyPixelCacheNexus(NexusInfo *nexus_info,
% const size_t number_threads)
%
% A description of each parameter follows:
%
% o nexus_info: the nexus to destroy.
%
% o number_threads: the number of nexus threads.
%
*/
static inline void RelinquishCacheNexusPixels(NexusInfo *nexus_info)
{
if (nexus_info->mapped == MagickFalse)
(void) RelinquishAlignedMemory(nexus_info->cache);
else
(void) UnmapBlob(nexus_info->cache,(size_t) nexus_info->length);
nexus_info->cache=(Quantum *) NULL;
nexus_info->pixels=(Quantum *) NULL;
nexus_info->metacontent=(void *) NULL;
nexus_info->length=0;
nexus_info->mapped=MagickFalse;
}
MagickPrivate NexusInfo **DestroyPixelCacheNexus(NexusInfo **nexus_info,
const size_t number_threads)
{
register ssize_t
i;
assert(nexus_info != (NexusInfo **) NULL);
for (i=0; i < (ssize_t) number_threads; i++)
{
if (nexus_info[i]->cache != (Quantum *) NULL)
RelinquishCacheNexusPixels(nexus_info[i]);
nexus_info[i]->signature=(~MagickCoreSignature);
}
nexus_info[0]=(NexusInfo *) RelinquishMagickMemory(nexus_info[0]);
nexus_info=(NexusInfo **) RelinquishAlignedMemory(nexus_info);
return(nexus_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticMetacontent() returns the authentic metacontent corresponding
% with the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is
% returned if the associated pixels are not available.
%
% The format of the GetAuthenticMetacontent() method is:
%
% void *GetAuthenticMetacontent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void *GetAuthenticMetacontent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_metacontent_from_handler !=
(GetAuthenticMetacontentFromHandler) NULL)
{
void
*metacontent;
metacontent=cache_info->methods.
get_authentic_metacontent_from_handler(image);
return(metacontent);
}
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c M e t a c o n t e n t F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticMetacontentFromCache() returns the meta-content corresponding
% with the last call to QueueAuthenticPixelsCache() or
% GetAuthenticPixelsCache().
%
% The format of the GetAuthenticMetacontentFromCache() method is:
%
% void *GetAuthenticMetacontentFromCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void *GetAuthenticMetacontentFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->metacontent);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c O p e n C L B u f f e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticOpenCLBuffer() returns an OpenCL buffer used to execute OpenCL
% operations.
%
% The format of the GetAuthenticOpenCLBuffer() method is:
%
% cl_mem GetAuthenticOpenCLBuffer(const Image *image,
% MagickCLDevice device,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o device: the device to use.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate cl_mem GetAuthenticOpenCLBuffer(const Image *image,
MagickCLDevice device,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
cl_int
status;
assert(image != (const Image *) NULL);
assert(device != (const MagickCLDevice) NULL);
cache_info=(CacheInfo *) image->cache;
if (cache_info->type == UndefinedCache)
SyncImagePixelCache((Image *) image,exception);
if ((cache_info->type != MemoryCache) || (cache_info->mapped != MagickFalse))
return((cl_mem) NULL);
if ((cache_info->opencl != (MagickCLCacheInfo) NULL) &&
(cache_info->opencl->device->context != device->context))
cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl);
if (cache_info->opencl == (MagickCLCacheInfo) NULL)
{
assert(cache_info->pixels != (Quantum *) NULL);
cache_info->opencl=AcquireMagickCLCacheInfo(device,cache_info->pixels,
cache_info->length);
if (cache_info->opencl == (MagickCLCacheInfo) NULL)
return((cl_mem) NULL);
}
assert(cache_info->opencl->pixels == cache_info->pixels);
return(cache_info->opencl->buffer);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelCacheNexus() gets authentic pixels from the in-memory or
% disk pixel cache as defined by the geometry parameters. A pointer to the
% pixels is returned if the pixels are transferred, otherwise a NULL is
% returned.
%
% The format of the GetAuthenticPixelCacheNexus() method is:
%
% Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to return.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
Quantum
*magick_restrict pixels;
/*
Transfer pixels from the cache.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickTrue,
nexus_info,exception);
if (pixels == (Quantum *) NULL)
return((Quantum *) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(pixels);
if (ReadPixelCachePixels(cache_info,nexus_info,exception) == MagickFalse)
return((Quantum *) NULL);
if (cache_info->metacontent_extent != 0)
if (ReadPixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse)
return((Quantum *) NULL);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelsFromCache() returns the pixels associated with the last
% call to the QueueAuthenticPixelsCache() or GetAuthenticPixelsCache() methods.
%
% The format of the GetAuthenticPixelsFromCache() method is:
%
% Quantum *GetAuthenticPixelsFromCache(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static Quantum *GetAuthenticPixelsFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c P i x e l Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelQueue() returns the authentic pixels associated
% corresponding with the last call to QueueAuthenticPixels() or
% GetAuthenticPixels().
%
% The format of the GetAuthenticPixelQueue() method is:
%
% Quantum *GetAuthenticPixelQueue(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Quantum *GetAuthenticPixelQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_pixels_from_handler !=
(GetAuthenticPixelsFromHandler) NULL)
return(cache_info->methods.get_authentic_pixels_from_handler(image));
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixels() obtains a pixel region for read/write access. If the
% region is successfully accessed, a pointer to a Quantum array
% representing the region is returned, otherwise NULL is returned.
%
% The returned pointer may point to a temporary working copy of the pixels
% or it may point to the original pixels in memory. Performance is maximized
% if the selected region is part of one row, or one or more full rows, since
% then there is opportunity to access the pixels in-place (without a copy)
% if the image is in memory, or in a memory-mapped file. The returned pointer
% must *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% Quantum. If the image has corresponding metacontent,call
% GetAuthenticMetacontent() after invoking GetAuthenticPixels() to obtain the
% meta-content corresponding to the region. Once the Quantum array has
% been updated, the changes must be saved back to the underlying image using
% SyncAuthenticPixels() or they may be lost.
%
% The format of the GetAuthenticPixels() method is:
%
% Quantum *GetAuthenticPixels(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Quantum *GetAuthenticPixels(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*pixels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_pixels_handler !=
(GetAuthenticPixelsHandler) NULL)
{
pixels=cache_info->methods.get_authentic_pixels_handler(image,x,y,columns,
rows,exception);
return(pixels);
}
assert(id < (int) cache_info->number_threads);
pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l s C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelsCache() gets pixels from the in-memory or disk pixel cache
% as defined by the geometry parameters. A pointer to the pixels is returned
% if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetAuthenticPixelsCache() method is:
%
% Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return((Quantum *) NULL);
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageExtent() returns the extent of the pixels associated corresponding
% with the last call to QueueAuthenticPixels() or GetAuthenticPixels().
%
% The format of the GetImageExtent() method is:
%
% MagickSizeType GetImageExtent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickSizeType GetImageExtent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetPixelCacheNexusExtent(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixelCache() ensures that there is only a single reference to the
% pixel cache to be modified, updating the provided cache pointer to point to
% a clone of the original pixel cache if necessary.
%
% The format of the GetImagePixelCache method is:
%
% Cache GetImagePixelCache(Image *image,const MagickBooleanType clone,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clone: any value other than MagickFalse clones the cache pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType ValidatePixelCacheMorphology(
const Image *magick_restrict image)
{
const CacheInfo
*magick_restrict cache_info;
const PixelChannelMap
*magick_restrict p,
*magick_restrict q;
/*
Does the image match the pixel cache morphology?
*/
cache_info=(CacheInfo *) image->cache;
p=image->channel_map;
q=cache_info->channel_map;
if ((image->storage_class != cache_info->storage_class) ||
(image->colorspace != cache_info->colorspace) ||
(image->alpha_trait != cache_info->alpha_trait) ||
(image->read_mask != cache_info->read_mask) ||
(image->write_mask != cache_info->write_mask) ||
(image->columns != cache_info->columns) ||
(image->rows != cache_info->rows) ||
(image->number_channels != cache_info->number_channels) ||
(memcmp(p,q,image->number_channels*sizeof(*p)) != 0) ||
(image->metacontent_extent != cache_info->metacontent_extent) ||
(cache_info->nexus_info == (NexusInfo **) NULL))
return(MagickFalse);
return(MagickTrue);
}
static Cache GetImagePixelCache(Image *image,const MagickBooleanType clone,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickBooleanType
destroy,
status;
static MagickSizeType
cache_timelimit = MagickResourceInfinity,
cpu_throttle = MagickResourceInfinity,
cycles = 0;
status=MagickTrue;
if (cpu_throttle == MagickResourceInfinity)
cpu_throttle=GetMagickResourceLimit(ThrottleResource);
if ((cpu_throttle != 0) && ((cycles++ % 32) == 0))
MagickDelay(cpu_throttle);
if (cache_epoch == 0)
{
/*
Set the expire time in seconds.
*/
cache_timelimit=GetMagickResourceLimit(TimeResource);
cache_epoch=time((time_t *) NULL);
}
if ((cache_timelimit != MagickResourceInfinity) &&
((MagickSizeType) (time((time_t *) NULL)-cache_epoch) >= cache_timelimit))
{
#if defined(ECANCELED)
errno=ECANCELED;
#endif
ThrowFatalException(ResourceLimitFatalError,"TimeLimitExceeded");
}
LockSemaphoreInfo(image->semaphore);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
destroy=MagickFalse;
if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode))
{
LockSemaphoreInfo(cache_info->semaphore);
if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode))
{
CacheInfo
*clone_info;
Image
clone_image;
/*
Clone pixel cache.
*/
clone_image=(*image);
clone_image.semaphore=AcquireSemaphoreInfo();
clone_image.reference_count=1;
clone_image.cache=ClonePixelCache(cache_info);
clone_info=(CacheInfo *) clone_image.cache;
status=OpenPixelCache(&clone_image,IOMode,exception);
if (status != MagickFalse)
{
if (clone != MagickFalse)
status=ClonePixelCacheRepository(clone_info,cache_info,
exception);
if (status != MagickFalse)
{
if (cache_info->reference_count == 1)
cache_info->nexus_info=(NexusInfo **) NULL;
destroy=MagickTrue;
image->cache=clone_image.cache;
}
}
RelinquishSemaphoreInfo(&clone_image.semaphore);
}
UnlockSemaphoreInfo(cache_info->semaphore);
}
if (destroy != MagickFalse)
cache_info=(CacheInfo *) DestroyPixelCache(cache_info);
if (status != MagickFalse)
{
/*
Ensure the image matches the pixel cache morphology.
*/
image->type=UndefinedType;
if (ValidatePixelCacheMorphology(image) == MagickFalse)
{
status=OpenPixelCache(image,IOMode,exception);
cache_info=(CacheInfo *) image->cache;
if (cache_info->type == DiskCache)
(void) ClosePixelCacheOnDisk(cache_info);
}
}
UnlockSemaphoreInfo(image->semaphore);
if (status == MagickFalse)
return((Cache) NULL);
return(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e P i x e l C a c h e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixelCacheType() returns the pixel cache type: UndefinedCache,
% DiskCache, MemoryCache, MapCache, or PingCache.
%
% The format of the GetImagePixelCacheType() method is:
%
% CacheType GetImagePixelCacheType(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport CacheType GetImagePixelCacheType(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->type);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e A u t h e n t i c P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneAuthenticPixel() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs.
%
% The format of the GetOneAuthenticPixel() method is:
%
% MagickBooleanType GetOneAuthenticPixel(const Image image,const ssize_t x,
% const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType CopyPixel(const Image *image,
const Quantum *source,Quantum *destination)
{
register ssize_t
i;
if (source == (const Quantum *) NULL)
{
destination[RedPixelChannel]=ClampToQuantum(image->background_color.red);
destination[GreenPixelChannel]=ClampToQuantum(
image->background_color.green);
destination[BluePixelChannel]=ClampToQuantum(
image->background_color.blue);
destination[BlackPixelChannel]=ClampToQuantum(
image->background_color.black);
destination[AlphaPixelChannel]=ClampToQuantum(
image->background_color.alpha);
return(MagickFalse);
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
destination[channel]=source[i];
}
return(MagickTrue);
}
MagickExport MagickBooleanType GetOneAuthenticPixel(Image *image,
const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
register Quantum
*magick_restrict q;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
if (cache_info->methods.get_one_authentic_pixel_from_handler !=
(GetOneAuthenticPixelFromHandler) NULL)
return(cache_info->methods.get_one_authentic_pixel_from_handler(image,x,y,
pixel,exception));
q=GetAuthenticPixelsCache(image,x,y,1UL,1UL,exception);
return(CopyPixel(image,q,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O n e A u t h e n t i c P i x e l F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneAuthenticPixelFromCache() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs.
%
% The format of the GetOneAuthenticPixelFromCache() method is:
%
% MagickBooleanType GetOneAuthenticPixelFromCache(const Image image,
% const ssize_t x,const ssize_t y,Quantum *pixel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetOneAuthenticPixelFromCache(Image *image,
const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
q=GetAuthenticPixelCacheNexus(image,x,y,1UL,1UL,cache_info->nexus_info[id],
exception);
return(CopyPixel(image,q,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixel() returns a single virtual pixel at the specified
% (x,y) location. The image background color is returned if an error occurs.
% If you plan to modify the pixel, use GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualPixel() method is:
%
% MagickBooleanType GetOneVirtualPixel(const Image image,const ssize_t x,
% const ssize_t y,Quantum *pixel,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualPixel(const Image *image,
const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
if (cache_info->methods.get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
return(cache_info->methods.get_one_virtual_pixel_from_handler(image,
GetPixelCacheVirtualMethod(image),x,y,pixel,exception));
assert(id < (int) cache_info->number_threads);
p=GetVirtualPixelsFromNexus(image,GetPixelCacheVirtualMethod(image),x,y,
1UL,1UL,cache_info->nexus_info[id],exception);
return(CopyPixel(image,p,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O n e V i r t u a l P i x e l F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixelFromCache() returns a single virtual pixel at the
% specified (x,y) location. The image background color is returned if an
% error occurs.
%
% The format of the GetOneVirtualPixelFromCache() method is:
%
% MagickBooleanType GetOneVirtualPixelFromCache(const Image image,
% const VirtualPixelMethod method,const ssize_t x,const ssize_t y,
% Quantum *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetOneVirtualPixelFromCache(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
return(CopyPixel(image,p,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l P i x e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixelInfo() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs. If
% you plan to modify the pixel, use GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualPixelInfo() method is:
%
% MagickBooleanType GetOneVirtualPixelInfo(const Image image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,PixelInfo *pixel,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: these values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualPixelInfo(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
PixelInfo *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
GetPixelInfo(image,pixel);
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
if (p == (const Quantum *) NULL)
return(MagickFalse);
GetPixelInfoPixel(image,p,pixel);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheColorspace() returns the class type of the pixel cache.
%
% The format of the GetPixelCacheColorspace() method is:
%
% Colorspace GetPixelCacheColorspace(Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
MagickPrivate ColorspaceType GetPixelCacheColorspace(const Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->colorspace);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheMethods() initializes the CacheMethods structure.
%
% The format of the GetPixelCacheMethods() method is:
%
% void GetPixelCacheMethods(CacheMethods *cache_methods)
%
% A description of each parameter follows:
%
% o cache_methods: Specifies a pointer to a CacheMethods structure.
%
*/
MagickPrivate void GetPixelCacheMethods(CacheMethods *cache_methods)
{
assert(cache_methods != (CacheMethods *) NULL);
(void) ResetMagickMemory(cache_methods,0,sizeof(*cache_methods));
cache_methods->get_virtual_pixel_handler=GetVirtualPixelCache;
cache_methods->get_virtual_pixels_handler=GetVirtualPixelsCache;
cache_methods->get_virtual_metacontent_from_handler=
GetVirtualMetacontentFromCache;
cache_methods->get_one_virtual_pixel_from_handler=GetOneVirtualPixelFromCache;
cache_methods->get_authentic_pixels_handler=GetAuthenticPixelsCache;
cache_methods->get_authentic_metacontent_from_handler=
GetAuthenticMetacontentFromCache;
cache_methods->get_authentic_pixels_from_handler=GetAuthenticPixelsFromCache;
cache_methods->get_one_authentic_pixel_from_handler=
GetOneAuthenticPixelFromCache;
cache_methods->queue_authentic_pixels_handler=QueueAuthenticPixelsCache;
cache_methods->sync_authentic_pixels_handler=SyncAuthenticPixelsCache;
cache_methods->destroy_pixel_handler=DestroyImagePixelCache;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e N e x u s E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheNexusExtent() returns the extent of the pixels associated
% corresponding with the last call to SetPixelCacheNexusPixels() or
% GetPixelCacheNexusPixels().
%
% The format of the GetPixelCacheNexusExtent() method is:
%
% MagickSizeType GetPixelCacheNexusExtent(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o nexus_info: the nexus info.
%
*/
MagickPrivate MagickSizeType GetPixelCacheNexusExtent(const Cache cache,
NexusInfo *magick_restrict nexus_info)
{
CacheInfo
*magick_restrict cache_info;
MagickSizeType
extent;
assert(cache != NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
extent=(MagickSizeType) nexus_info->region.width*nexus_info->region.height;
if (extent == 0)
return((MagickSizeType) cache_info->columns*cache_info->rows);
return(extent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCachePixels() returns the pixels associated with the specified image.
%
% The format of the GetPixelCachePixels() method is:
%
% void *GetPixelCachePixels(Image *image,MagickSizeType *length,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o length: the pixel cache length.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate void *GetPixelCachePixels(Image *image,MagickSizeType *length,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
assert(length != (MagickSizeType *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*length=0;
if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache))
return((void *) NULL);
*length=cache_info->length;
return((void *) cache_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e S t o r a g e C l a s s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheStorageClass() returns the class type of the pixel cache.
%
% The format of the GetPixelCacheStorageClass() method is:
%
% ClassType GetPixelCacheStorageClass(Cache cache)
%
% A description of each parameter follows:
%
% o type: GetPixelCacheStorageClass returns DirectClass or PseudoClass.
%
% o cache: the pixel cache.
%
*/
MagickPrivate ClassType GetPixelCacheStorageClass(const Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->storage_class);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e T i l e S i z e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheTileSize() returns the pixel cache tile size.
%
% The format of the GetPixelCacheTileSize() method is:
%
% void GetPixelCacheTileSize(const Image *image,size_t *width,
% size_t *height)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the optimize cache tile width in pixels.
%
% o height: the optimize cache tile height in pixels.
%
*/
MagickPrivate void GetPixelCacheTileSize(const Image *image,size_t *width,
size_t *height)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*width=2048UL/(cache_info->number_channels*sizeof(Quantum));
if (GetImagePixelCacheType(image) == DiskCache)
*width=8192UL/(cache_info->number_channels*sizeof(Quantum));
*height=(*width);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e V i r t u a l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheVirtualMethod() gets the "virtual pixels" method for the
% pixel cache. A virtual pixel is any pixel access that is outside the
% boundaries of the image cache.
%
% The format of the GetPixelCacheVirtualMethod() method is:
%
% VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickPrivate VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->virtual_pixel_method);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l M e t a c o n t e n t F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualMetacontentFromCache() returns the meta-content corresponding with
% the last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache().
%
% The format of the GetVirtualMetacontentFromCache() method is:
%
% void *GetVirtualMetacontentFromCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static const void *GetVirtualMetacontentFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const void
*magick_restrict metacontent;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
metacontent=GetVirtualMetacontentFromNexus(cache_info,
cache_info->nexus_info[id]);
return(metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l M e t a c o n t e n t F r o m N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualMetacontentFromNexus() returns the meta-content for the specified
% cache nexus.
%
% The format of the GetVirtualMetacontentFromNexus() method is:
%
% const void *GetVirtualMetacontentFromNexus(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o nexus_info: the cache nexus to return the meta-content.
%
*/
MagickPrivate const void *GetVirtualMetacontentFromNexus(const Cache cache,
NexusInfo *magick_restrict nexus_info)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->storage_class == UndefinedClass)
return((void *) NULL);
return(nexus_info->metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualMetacontent() returns the virtual metacontent corresponding with
% the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is
% returned if the meta-content are not available.
%
% The format of the GetVirtualMetacontent() method is:
%
% const void *GetVirtualMetacontent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const void *GetVirtualMetacontent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const void
*magick_restrict metacontent;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
metacontent=cache_info->methods.get_virtual_metacontent_from_handler(image);
if (metacontent != (void *) NULL)
return(metacontent);
assert(id < (int) cache_info->number_threads);
metacontent=GetVirtualMetacontentFromNexus(cache_info,
cache_info->nexus_info[id]);
return(metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s F r o m N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsFromNexus() gets virtual pixels from the in-memory or disk
% pixel cache as defined by the geometry parameters. A pointer to the pixels
% is returned if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetVirtualPixelsFromNexus() method is:
%
% Quantum *GetVirtualPixelsFromNexus(const Image *image,
% const VirtualPixelMethod method,const ssize_t x,const ssize_t y,
% const size_t columns,const size_t rows,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to acquire.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t
DitherMatrix[64] =
{
0, 48, 12, 60, 3, 51, 15, 63,
32, 16, 44, 28, 35, 19, 47, 31,
8, 56, 4, 52, 11, 59, 7, 55,
40, 24, 36, 20, 43, 27, 39, 23,
2, 50, 14, 62, 1, 49, 13, 61,
34, 18, 46, 30, 33, 17, 45, 29,
10, 58, 6, 54, 9, 57, 5, 53,
42, 26, 38, 22, 41, 25, 37, 21
};
static inline ssize_t DitherX(const ssize_t x,const size_t columns)
{
ssize_t
index;
index=x+DitherMatrix[x & 0x07]-32L;
if (index < 0L)
return(0L);
if (index >= (ssize_t) columns)
return((ssize_t) columns-1L);
return(index);
}
static inline ssize_t DitherY(const ssize_t y,const size_t rows)
{
ssize_t
index;
index=y+DitherMatrix[y & 0x07]-32L;
if (index < 0L)
return(0L);
if (index >= (ssize_t) rows)
return((ssize_t) rows-1L);
return(index);
}
static inline ssize_t EdgeX(const ssize_t x,const size_t columns)
{
if (x < 0L)
return(0L);
if (x >= (ssize_t) columns)
return((ssize_t) (columns-1));
return(x);
}
static inline ssize_t EdgeY(const ssize_t y,const size_t rows)
{
if (y < 0L)
return(0L);
if (y >= (ssize_t) rows)
return((ssize_t) (rows-1));
return(y);
}
static inline ssize_t RandomX(RandomInfo *random_info,const size_t columns)
{
return((ssize_t) (columns*GetPseudoRandomValue(random_info)));
}
static inline ssize_t RandomY(RandomInfo *random_info,const size_t rows)
{
return((ssize_t) (rows*GetPseudoRandomValue(random_info)));
}
static inline MagickModulo VirtualPixelModulo(const ssize_t offset,
const size_t extent)
{
MagickModulo
modulo;
/*
Compute the remainder of dividing offset by extent. It returns not only
the quotient (tile the offset falls in) but also the positive remainer
within that tile such that 0 <= remainder < extent. This method is
essentially a ldiv() using a floored modulo division rather than the
normal default truncated modulo division.
*/
modulo.quotient=offset/(ssize_t) extent;
if (offset < 0L)
modulo.quotient--;
modulo.remainder=offset-modulo.quotient*(ssize_t) extent;
return(modulo);
}
MagickPrivate const Quantum *GetVirtualPixelsFromNexus(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
offset;
MagickSizeType
length,
number_pixels;
NexusInfo
**magick_restrict virtual_nexus;
Quantum
*magick_restrict pixels,
virtual_pixel[MaxPixelChannels];
RectangleInfo
region;
register const Quantum
*magick_restrict p;
register const void
*magick_restrict r;
register Quantum
*magick_restrict q;
register ssize_t
i,
u;
register unsigned char
*magick_restrict s;
ssize_t
v;
void
*magick_restrict virtual_metacontent;
/*
Acquire pixels.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return((const Quantum *) NULL);
region.x=x;
region.y=y;
region.width=columns;
region.height=rows;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,®ion,nexus_info,
exception);
if (pixels == (Quantum *) NULL)
return((const Quantum *) NULL);
q=pixels;
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) (nexus_info->region.height-1L)*cache_info->columns+
nexus_info->region.width-1L;
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
if ((offset >= 0) && (((MagickSizeType) offset+length) < number_pixels))
if ((x >= 0) && ((ssize_t) (x+columns) <= (ssize_t) cache_info->columns) &&
(y >= 0) && ((ssize_t) (y+rows) <= (ssize_t) cache_info->rows))
{
MagickBooleanType
status;
/*
Pixel request is inside cache extents.
*/
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(q);
status=ReadPixelCachePixels(cache_info,nexus_info,exception);
if (status == MagickFalse)
return((const Quantum *) NULL);
if (cache_info->metacontent_extent != 0)
{
status=ReadPixelCacheMetacontent(cache_info,nexus_info,exception);
if (status == MagickFalse)
return((const Quantum *) NULL);
}
return(q);
}
/*
Pixel request is outside cache extents.
*/
s=(unsigned char *) nexus_info->metacontent;
virtual_nexus=AcquirePixelCacheNexus(1);
if (virtual_nexus == (NexusInfo **) NULL)
{
if (virtual_nexus != (NexusInfo **) NULL)
virtual_nexus=DestroyPixelCacheNexus(virtual_nexus,1);
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"UnableToGetCacheNexus","`%s'",image->filename);
return((const Quantum *) NULL);
}
(void) ResetMagickMemory(virtual_pixel,0,cache_info->number_channels*
sizeof(*virtual_pixel));
virtual_metacontent=(void *) NULL;
switch (virtual_pixel_method)
{
case BackgroundVirtualPixelMethod:
case BlackVirtualPixelMethod:
case GrayVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
case EdgeVirtualPixelMethod:
case CheckerTileVirtualPixelMethod:
case HorizontalTileVirtualPixelMethod:
case VerticalTileVirtualPixelMethod:
{
if (cache_info->metacontent_extent != 0)
{
/*
Acquire a metacontent buffer.
*/
virtual_metacontent=(void *) AcquireQuantumMemory(1,
cache_info->metacontent_extent);
if (virtual_metacontent == (void *) NULL)
{
virtual_nexus=DestroyPixelCacheNexus(virtual_nexus,1);
(void) ThrowMagickException(exception,GetMagickModule(),
CacheError,"UnableToGetCacheNexus","`%s'",image->filename);
return((const Quantum *) NULL);
}
(void) ResetMagickMemory(virtual_metacontent,0,
cache_info->metacontent_extent);
}
switch (virtual_pixel_method)
{
case BlackVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,0,virtual_pixel);
SetPixelAlpha(image,OpaqueAlpha,virtual_pixel);
break;
}
case GrayVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,QuantumRange/2,
virtual_pixel);
SetPixelAlpha(image,OpaqueAlpha,virtual_pixel);
break;
}
case TransparentVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,0,virtual_pixel);
SetPixelAlpha(image,TransparentAlpha,virtual_pixel);
break;
}
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,QuantumRange,virtual_pixel);
SetPixelAlpha(image,OpaqueAlpha,virtual_pixel);
break;
}
default:
{
SetPixelRed(image,ClampToQuantum(image->background_color.red),
virtual_pixel);
SetPixelGreen(image,ClampToQuantum(image->background_color.green),
virtual_pixel);
SetPixelBlue(image,ClampToQuantum(image->background_color.blue),
virtual_pixel);
SetPixelBlack(image,ClampToQuantum(image->background_color.black),
virtual_pixel);
SetPixelAlpha(image,ClampToQuantum(image->background_color.alpha),
virtual_pixel);
break;
}
}
break;
}
default:
break;
}
for (v=0; v < (ssize_t) rows; v++)
{
ssize_t
y_offset;
y_offset=y+v;
if ((virtual_pixel_method == EdgeVirtualPixelMethod) ||
(virtual_pixel_method == UndefinedVirtualPixelMethod))
y_offset=EdgeY(y_offset,cache_info->rows);
for (u=0; u < (ssize_t) columns; u+=length)
{
ssize_t
x_offset;
x_offset=x+u;
length=(MagickSizeType) MagickMin(cache_info->columns-x_offset,columns-u);
if (((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) ||
((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) ||
(length == 0))
{
MagickModulo
x_modulo,
y_modulo;
/*
Transfer a single pixel.
*/
length=(MagickSizeType) 1;
switch (virtual_pixel_method)
{
case EdgeVirtualPixelMethod:
default:
{
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
EdgeX(x_offset,cache_info->columns),
EdgeY(y_offset,cache_info->rows),1UL,1UL,*virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus);
break;
}
case RandomVirtualPixelMethod:
{
if (cache_info->random_info == (RandomInfo *) NULL)
cache_info->random_info=AcquireRandomInfo();
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
RandomX(cache_info->random_info,cache_info->columns),
RandomY(cache_info->random_info,cache_info->rows),1UL,1UL,
*virtual_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus);
break;
}
case DitherVirtualPixelMethod:
{
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
DitherX(x_offset,cache_info->columns),
DitherY(y_offset,cache_info->rows),1UL,1UL,*virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus);
break;
}
case TileVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus);
break;
}
case MirrorVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
if ((x_modulo.quotient & 0x01) == 1L)
x_modulo.remainder=(ssize_t) cache_info->columns-
x_modulo.remainder-1L;
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
if ((y_modulo.quotient & 0x01) == 1L)
y_modulo.remainder=(ssize_t) cache_info->rows-
y_modulo.remainder-1L;
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus);
break;
}
case HorizontalTileEdgeVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
x_modulo.remainder,EdgeY(y_offset,cache_info->rows),1UL,1UL,
*virtual_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus);
break;
}
case VerticalTileEdgeVirtualPixelMethod:
{
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
EdgeX(x_offset,cache_info->columns),y_modulo.remainder,1UL,1UL,
*virtual_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus);
break;
}
case BackgroundVirtualPixelMethod:
case BlackVirtualPixelMethod:
case GrayVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
case CheckerTileVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
if (((x_modulo.quotient ^ y_modulo.quotient) & 0x01) != 0L)
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus);
break;
}
case HorizontalTileVirtualPixelMethod:
{
if ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows))
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus);
break;
}
case VerticalTileVirtualPixelMethod:
{
if ((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns))
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus);
break;
}
}
if (p == (const Quantum *) NULL)
break;
(void) memcpy(q,p,(size_t) length*cache_info->number_channels*
sizeof(*p));
q+=cache_info->number_channels;
if ((s != (void *) NULL) && (r != (const void *) NULL))
{
(void) memcpy(s,r,(size_t) cache_info->metacontent_extent);
s+=cache_info->metacontent_extent;
}
continue;
}
/*
Transfer a run of pixels.
*/
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,x_offset,y_offset,
(size_t) length,1UL,*virtual_nexus,exception);
if (p == (const Quantum *) NULL)
break;
r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus);
(void) memcpy(q,p,(size_t) length*cache_info->number_channels*sizeof(*p));
q+=length*cache_info->number_channels;
if ((r != (void *) NULL) && (s != (const void *) NULL))
{
(void) memcpy(s,r,(size_t) length);
s+=length*cache_info->metacontent_extent;
}
}
if (u < (ssize_t) columns)
break;
}
/*
Free resources.
*/
if (virtual_metacontent != (void *) NULL)
virtual_metacontent=(void *) RelinquishMagickMemory(virtual_metacontent);
virtual_nexus=DestroyPixelCacheNexus(virtual_nexus,1);
if (v < (ssize_t) rows)
return((const Quantum *) NULL);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelCache() get virtual pixels from the in-memory or disk pixel
% cache as defined by the geometry parameters. A pointer to the pixels
% is returned if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetVirtualPixelCache() method is:
%
% const Quantum *GetVirtualPixelCache(const Image *image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const Quantum *GetVirtualPixelCache(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,x,y,columns,rows,
cache_info->nexus_info[id],exception);
return(p);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l P i x e l Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelQueue() returns the virtual pixels associated corresponding
% with the last call to QueueAuthenticPixels() or GetVirtualPixels().
%
% The format of the GetVirtualPixelQueue() method is:
%
% const Quantum *GetVirtualPixelQueue(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const Quantum *GetVirtualPixelQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_virtual_pixels_handler !=
(GetVirtualPixelsHandler) NULL)
return(cache_info->methods.get_virtual_pixels_handler(image));
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsNexus(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixels() returns an immutable pixel region. If the
% region is successfully accessed, a pointer to it is returned, otherwise
% NULL is returned. The returned pointer may point to a temporary working
% copy of the pixels or it may point to the original pixels in memory.
% Performance is maximized if the selected region is part of one row, or one
% or more full rows, since there is opportunity to access the pixels in-place
% (without a copy) if the image is in memory, or in a memory-mapped file. The
% returned pointer must *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% Quantum. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to
% access the meta-content (of type void) corresponding to the the
% region.
%
% If you plan to modify the pixels, use GetAuthenticPixels() instead.
%
% Note, the GetVirtualPixels() and GetAuthenticPixels() methods are not thread-
% safe. In a threaded environment, use GetCacheViewVirtualPixels() or
% GetCacheViewAuthenticPixels() instead.
%
% The format of the GetVirtualPixels() method is:
%
% const Quantum *GetVirtualPixels(const Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport const Quantum *GetVirtualPixels(const Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_virtual_pixel_handler !=
(GetVirtualPixelHandler) NULL)
return(cache_info->methods.get_virtual_pixel_handler(image,
GetPixelCacheVirtualMethod(image),x,y,columns,rows,exception));
assert(id < (int) cache_info->number_threads);
p=GetVirtualPixelsFromNexus(image,GetPixelCacheVirtualMethod(image),x,y,
columns,rows,cache_info->nexus_info[id],exception);
return(p);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsCache() returns the pixels associated corresponding with the
% last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache().
%
% The format of the GetVirtualPixelsCache() method is:
%
% Quantum *GetVirtualPixelsCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static const Quantum *GetVirtualPixelsCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsNexus(image->cache,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsNexus() returns the pixels associated with the specified
% cache nexus.
%
% The format of the GetVirtualPixelsNexus() method is:
%
% const Quantum *GetVirtualPixelsNexus(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o nexus_info: the cache nexus to return the colormap pixels.
%
*/
MagickPrivate const Quantum *GetVirtualPixelsNexus(const Cache cache,
NexusInfo *magick_restrict nexus_info)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->storage_class == UndefinedClass)
return((Quantum *) NULL);
return((const Quantum *) nexus_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ O p e n P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OpenPixelCache() allocates the pixel cache. This includes defining the cache
% dimensions, allocating space for the image pixels and optionally the
% metacontent, and memory mapping the cache if it is disk based. The cache
% nexus array is initialized as well.
%
% The format of the OpenPixelCache() method is:
%
% MagickBooleanType OpenPixelCache(Image *image,const MapMode mode,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o mode: ReadMode, WriteMode, or IOMode.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
#if defined(SIGBUS)
static void CacheSignalHandler(int status)
{
ThrowFatalException(CacheFatalError,"UnableToExtendPixelCache");
}
#endif
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static MagickBooleanType OpenPixelCacheOnDisk(CacheInfo *cache_info,
const MapMode mode)
{
int
file;
/*
Open pixel cache on disk.
*/
if ((cache_info->file != -1) && (cache_info->mode == mode))
return(MagickTrue); /* cache already open and in the proper mode */
if (*cache_info->cache_filename == '\0')
file=AcquireUniqueFileResource(cache_info->cache_filename);
else
switch (mode)
{
case ReadMode:
{
file=open_utf8(cache_info->cache_filename,O_RDONLY | O_BINARY,0);
break;
}
case WriteMode:
{
file=open_utf8(cache_info->cache_filename,O_WRONLY | O_CREAT |
O_BINARY | O_EXCL,S_MODE);
if (file == -1)
file=open_utf8(cache_info->cache_filename,O_WRONLY | O_BINARY,S_MODE);
break;
}
case IOMode:
default:
{
file=open_utf8(cache_info->cache_filename,O_RDWR | O_CREAT | O_BINARY |
O_EXCL,S_MODE);
if (file == -1)
file=open_utf8(cache_info->cache_filename,O_RDWR | O_BINARY,S_MODE);
break;
}
}
if (file == -1)
return(MagickFalse);
(void) AcquireMagickResource(FileResource,1);
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
cache_info->file=file;
return(MagickTrue);
}
static inline MagickOffsetType WritePixelCacheRegion(
const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset,
const MagickSizeType length,const unsigned char *magick_restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PWRITE)
if (lseek(cache_info->file,offset,SEEK_SET) < 0)
return((MagickOffsetType) -1);
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PWRITE)
count=write(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX));
#else
count=pwrite(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX),(off_t) (offset+i));
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
return(i);
}
static MagickBooleanType SetPixelCacheExtent(Image *image,MagickSizeType length)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
count,
extent,
offset;
cache_info=(CacheInfo *) image->cache;
if (image->debug != MagickFalse)
{
char
format[MagickPathExtent],
message[MagickPathExtent];
(void) FormatMagickSize(length,MagickFalse,"B",MagickPathExtent,format);
(void) FormatLocaleString(message,MagickPathExtent,
"extend %s (%s[%d], disk, %s)",cache_info->filename,
cache_info->cache_filename,cache_info->file,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
if (length != (MagickSizeType) ((MagickOffsetType) length))
return(MagickFalse);
offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_END);
if (offset < 0)
return(MagickFalse);
if ((MagickSizeType) offset >= length)
count=(MagickOffsetType) 1;
else
{
extent=(MagickOffsetType) length-1;
count=WritePixelCacheRegion(cache_info,extent,1,(const unsigned char *)
"");
if (count != 1)
return(MagickFalse);
#if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE)
if (cache_info->synchronize != MagickFalse)
(void) posix_fallocate(cache_info->file,offset+1,extent-offset);
#endif
#if defined(SIGBUS)
(void) signal(SIGBUS,CacheSignalHandler);
#endif
}
offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_SET);
if (offset < 0)
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType OpenPixelCache(Image *image,const MapMode mode,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info,
source_info;
char
format[MagickPathExtent],
message[MagickPathExtent];
const char
*type;
MagickBooleanType
status;
MagickSizeType
length,
number_pixels;
size_t
columns,
packet_size;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (cache_anonymous_memory < 0)
{
char
*value;
/*
Does the security policy require anonymous mapping for pixel cache?
*/
cache_anonymous_memory=0;
value=GetPolicyValue("pixel-cache-memory");
if (value == (char *) NULL)
value=GetPolicyValue("cache:memory-map");
if (LocaleCompare(value,"anonymous") == 0)
{
#if defined(MAGICKCORE_HAVE_MMAP) && defined(MAP_ANONYMOUS)
cache_anonymous_memory=1;
#else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateError,"DelegateLibrarySupportNotBuiltIn",
"'%s' (policy requires anonymous memory mapping)",image->filename);
#endif
}
value=DestroyString(value);
}
if ((image->columns == 0) || (image->rows == 0))
ThrowBinaryException(CacheError,"NoPixelsDefinedInCache",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if ((AcquireMagickResource(WidthResource,image->columns) == MagickFalse) ||
(AcquireMagickResource(HeightResource,image->rows) == MagickFalse))
ThrowBinaryException(ImageError,"WidthOrHeightExceedsLimit",
image->filename);
source_info=(*cache_info);
source_info.file=(-1);
(void) FormatLocaleString(cache_info->filename,MagickPathExtent,"%s[%.20g]",
image->filename,(double) GetImageIndexInList(image));
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
cache_info->alpha_trait=image->alpha_trait;
cache_info->read_mask=image->read_mask;
cache_info->write_mask=image->write_mask;
cache_info->rows=image->rows;
cache_info->columns=image->columns;
InitializePixelChannelMap(image);
cache_info->number_channels=GetPixelChannels(image);
(void) memcpy(cache_info->channel_map,image->channel_map,MaxPixelChannels*
sizeof(*image->channel_map));
cache_info->metacontent_extent=image->metacontent_extent;
cache_info->mode=mode;
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
packet_size=cache_info->number_channels*sizeof(Quantum);
if (image->metacontent_extent != 0)
packet_size+=cache_info->metacontent_extent;
length=number_pixels*packet_size;
columns=(size_t) (length/cache_info->rows/packet_size);
if ((cache_info->columns != columns) || ((ssize_t) cache_info->columns < 0) ||
((ssize_t) cache_info->rows < 0))
ThrowBinaryException(ResourceLimitError,"PixelCacheAllocationFailed",
image->filename);
cache_info->length=length;
if (image->ping != MagickFalse)
{
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
cache_info->type=PingCache;
return(MagickTrue);
}
status=AcquireMagickResource(AreaResource,cache_info->length);
length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+
cache_info->metacontent_extent);
if ((status != MagickFalse) && (length == (MagickSizeType) ((size_t) length)))
{
status=AcquireMagickResource(MemoryResource,cache_info->length);
if (((cache_info->type == UndefinedCache) && (status != MagickFalse)) ||
(cache_info->type == MemoryCache))
{
status=MagickTrue;
if (cache_anonymous_memory <= 0)
{
cache_info->mapped=MagickFalse;
cache_info->pixels=(Quantum *) MagickAssumeAligned(
AcquireAlignedMemory(1,(size_t) cache_info->length));
}
else
{
cache_info->mapped=MagickTrue;
cache_info->pixels=(Quantum *) MapBlob(-1,IOMode,0,(size_t)
cache_info->length);
}
if (cache_info->pixels == (Quantum *) NULL)
cache_info->pixels=source_info.pixels;
else
{
/*
Create memory pixel cache.
*/
cache_info->type=MemoryCache;
cache_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
cache_info->metacontent=(void *) (cache_info->pixels+
number_pixels*cache_info->number_channels);
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickTrue,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s %s, %.20gx%.20gx%.20g %s)",
cache_info->filename,cache_info->mapped != MagickFalse ?
"Anonymous" : "Heap",type,(double) cache_info->columns,
(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
return(status == 0 ? MagickFalse : MagickTrue);
}
}
RelinquishMagickResource(MemoryResource,cache_info->length);
}
/*
Create pixel cache on disk.
*/
status=AcquireMagickResource(DiskResource,cache_info->length);
if ((status == MagickFalse) || (cache_info->type == DistributedCache))
{
DistributeCacheInfo
*server_info;
if (cache_info->type == DistributedCache)
RelinquishMagickResource(DiskResource,cache_info->length);
server_info=AcquireDistributeCacheInfo(exception);
if (server_info != (DistributeCacheInfo *) NULL)
{
status=OpenDistributePixelCache(server_info,image);
if (status == MagickFalse)
{
ThrowFileException(exception,CacheError,"UnableToOpenPixelCache",
GetDistributeCacheHostname(server_info));
server_info=DestroyDistributeCacheInfo(server_info);
}
else
{
/*
Create a distributed pixel cache.
*/
status=MagickTrue;
cache_info->type=DistributedCache;
cache_info->server_info=server_info;
(void) FormatLocaleString(cache_info->cache_filename,
MagickPathExtent,"%s:%d",GetDistributeCacheHostname(
(DistributeCacheInfo *) cache_info->server_info),
GetDistributeCachePort((DistributeCacheInfo *)
cache_info->server_info));
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickFalse,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",
cache_info->filename,cache_info->cache_filename,
GetDistributeCacheFile((DistributeCacheInfo *)
cache_info->server_info),type,(double) cache_info->columns,
(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
return(status == 0 ? MagickFalse : MagickTrue);
}
}
RelinquishMagickResource(DiskResource,cache_info->length);
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode))
{
(void) ClosePixelCacheOnDisk(cache_info);
*cache_info->cache_filename='\0';
}
if (OpenPixelCacheOnDisk(cache_info,mode) == MagickFalse)
{
RelinquishMagickResource(DiskResource,cache_info->length);
ThrowFileException(exception,CacheError,"UnableToOpenPixelCache",
image->filename);
return(MagickFalse);
}
status=SetPixelCacheExtent(image,(MagickSizeType) cache_info->offset+
cache_info->length);
if (status == MagickFalse)
{
ThrowFileException(exception,CacheError,"UnableToExtendCache",
image->filename);
return(MagickFalse);
}
length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+
cache_info->metacontent_extent);
if (length != (MagickSizeType) ((size_t) length))
cache_info->type=DiskCache;
else
{
status=AcquireMagickResource(MapResource,cache_info->length);
if ((status == MagickFalse) && (cache_info->type != MapCache) &&
(cache_info->type != MemoryCache))
{
status=MagickTrue;
cache_info->type=DiskCache;
}
else
{
status=MagickTrue;
cache_info->pixels=(Quantum *) MapBlob(cache_info->file,mode,
cache_info->offset,(size_t) cache_info->length);
if (cache_info->pixels == (Quantum *) NULL)
{
cache_info->type=DiskCache;
cache_info->pixels=source_info.pixels;
}
else
{
/*
Create file-backed memory-mapped pixel cache.
*/
(void) ClosePixelCacheOnDisk(cache_info);
cache_info->type=MapCache;
cache_info->mapped=MagickTrue;
cache_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
cache_info->metacontent=(void *) (cache_info->pixels+
number_pixels*cache_info->number_channels);
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickTrue,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",
cache_info->filename,cache_info->cache_filename,
cache_info->file,type,(double) cache_info->columns,(double)
cache_info->rows,(double) cache_info->number_channels,
format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
return(status == 0 ? MagickFalse : MagickTrue);
}
}
RelinquishMagickResource(MapResource,cache_info->length);
}
status=MagickTrue;
if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickFalse,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",cache_info->filename,
cache_info->cache_filename,cache_info->file,type,(double)
cache_info->columns,(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
return(status == 0 ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r s i s t P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PersistPixelCache() attaches to or initializes a persistent pixel cache. A
% persistent pixel cache is one that resides on disk and is not destroyed
% when the program exits.
%
% The format of the PersistPixelCache() method is:
%
% MagickBooleanType PersistPixelCache(Image *image,const char *filename,
% const MagickBooleanType attach,MagickOffsetType *offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o filename: the persistent pixel cache filename.
%
% o attach: A value other than zero initializes the persistent pixel cache.
%
% o initialize: A value other than zero initializes the persistent pixel
% cache.
%
% o offset: the offset in the persistent cache to store pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType PersistPixelCache(Image *image,
const char *filename,const MagickBooleanType attach,MagickOffsetType *offset,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info,
*magick_restrict clone_info;
Image
clone_image;
MagickBooleanType
status;
ssize_t
page_size;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (void *) NULL);
assert(filename != (const char *) NULL);
assert(offset != (MagickOffsetType *) NULL);
page_size=GetMagickPageSize();
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
if (attach != MagickFalse)
{
/*
Attach existing persistent pixel cache.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"attach persistent cache");
(void) CopyMagickString(cache_info->cache_filename,filename,
MagickPathExtent);
cache_info->type=DiskCache;
cache_info->offset=(*offset);
if (OpenPixelCache(image,ReadMode,exception) == MagickFalse)
return(MagickFalse);
*offset+=cache_info->length+page_size-(cache_info->length % page_size);
return(MagickTrue);
}
if ((cache_info->mode != ReadMode) &&
((cache_info->type == DiskCache) || (cache_info->type == MapCache)) &&
(cache_info->reference_count == 1))
{
LockSemaphoreInfo(cache_info->semaphore);
if ((cache_info->mode != ReadMode) &&
((cache_info->type == DiskCache) || (cache_info->type == MapCache)) &&
(cache_info->reference_count == 1))
{
/*
Usurp existing persistent pixel cache.
*/
if (rename_utf8(cache_info->cache_filename, filename) == 0)
{
(void) CopyMagickString(cache_info->cache_filename,filename,
MagickPathExtent);
*offset+=cache_info->length+page_size-(cache_info->length %
page_size);
UnlockSemaphoreInfo(cache_info->semaphore);
cache_info=(CacheInfo *) ReferencePixelCache(cache_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"Usurp resident persistent cache");
return(MagickTrue);
}
}
UnlockSemaphoreInfo(cache_info->semaphore);
}
/*
Clone persistent pixel cache.
*/
clone_image=(*image);
clone_info=(CacheInfo *) clone_image.cache;
image->cache=ClonePixelCache(cache_info);
cache_info=(CacheInfo *) ReferencePixelCache(image->cache);
(void) CopyMagickString(cache_info->cache_filename,filename,MagickPathExtent);
cache_info->type=DiskCache;
cache_info->offset=(*offset);
cache_info=(CacheInfo *) image->cache;
status=OpenPixelCache(image,IOMode,exception);
if (status != MagickFalse)
status=ClonePixelCacheRepository(cache_info,clone_info,exception);
*offset+=cache_info->length+page_size-(cache_info->length % page_size);
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u e u e A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixelCacheNexus() allocates an region to store image pixels as
% defined by the region rectangle and returns a pointer to the region. This
% region is subsequently transferred from the pixel cache with
% SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the
% pixels are transferred, otherwise a NULL is returned.
%
% The format of the QueueAuthenticPixelCacheNexus() method is:
%
% Quantum *QueueAuthenticPixelCacheNexus(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% const MagickBooleanType clone,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to set.
%
% o clone: clone the pixel cache.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate Quantum *QueueAuthenticPixelCacheNexus(Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
const MagickBooleanType clone,NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
offset;
MagickSizeType
number_pixels;
Quantum
*magick_restrict pixels;
RectangleInfo
region;
/*
Validate pixel cache geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) GetImagePixelCache(image,clone,exception);
if (cache_info == (Cache) NULL)
return((Quantum *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if ((cache_info->columns == 0) || (cache_info->rows == 0) || (x < 0) ||
(y < 0) || (x >= (ssize_t) cache_info->columns) ||
(y >= (ssize_t) cache_info->rows))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"PixelsAreNotAuthentic","`%s'",image->filename);
return((Quantum *) NULL);
}
offset=(MagickOffsetType) y*cache_info->columns+x;
if (offset < 0)
return((Quantum *) NULL);
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
offset+=(MagickOffsetType) (rows-1)*cache_info->columns+columns-1;
if ((MagickSizeType) offset >= number_pixels)
return((Quantum *) NULL);
/*
Return pixel cache.
*/
region.x=x;
region.y=y;
region.width=columns;
region.height=rows;
pixels=SetPixelCacheNexusPixels(cache_info,WriteMode,®ion,nexus_info,
exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u e u e A u t h e n t i c P i x e l s C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixelsCache() allocates an region to store image pixels as
% defined by the region rectangle and returns a pointer to the region. This
% region is subsequently transferred from the pixel cache with
% SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the
% pixels are transferred, otherwise a NULL is returned.
%
% The format of the QueueAuthenticPixelsCache() method is:
%
% Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u e u e A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixels() queues a mutable pixel region. If the region is
% successfully initialized a pointer to a Quantum array representing the
% region is returned, otherwise NULL is returned. The returned pointer may
% point to a temporary working buffer for the pixels or it may point to the
% final location of the pixels in memory.
%
% Write-only access means that any existing pixel values corresponding to
% the region are ignored. This is useful if the initial image is being
% created from scratch, or if the existing pixel values are to be
% completely replaced without need to refer to their pre-existing values.
% The application is free to read and write the pixel buffer returned by
% QueueAuthenticPixels() any way it pleases. QueueAuthenticPixels() does not
% initialize the pixel array values. Initializing pixel array values is the
% application's responsibility.
%
% Performance is maximized if the selected region is part of one row, or
% one or more full rows, since then there is opportunity to access the
% pixels in-place (without a copy) if the image is in memory, or in a
% memory-mapped file. The returned pointer must *never* be deallocated
% by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% Quantum. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to
% obtain the meta-content (of type void) corresponding to the region.
% Once the Quantum (and/or Quantum) array has been updated, the
% changes must be saved back to the underlying image using
% SyncAuthenticPixels() or they may be lost.
%
% The format of the QueueAuthenticPixels() method is:
%
% Quantum *QueueAuthenticPixels(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Quantum *QueueAuthenticPixels(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict pixels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.queue_authentic_pixels_handler !=
(QueueAuthenticPixelsHandler) NULL)
{
pixels=cache_info->methods.queue_authentic_pixels_handler(image,x,y,
columns,rows,exception);
return(pixels);
}
assert(id < (int) cache_info->number_threads);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d P i x e l C a c h e M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPixelCacheMetacontent() reads metacontent from the specified region of
% the pixel cache.
%
% The format of the ReadPixelCacheMetacontent() method is:
%
% MagickBooleanType ReadPixelCacheMetacontent(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to read the metacontent.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickOffsetType ReadPixelCacheRegion(
const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset,
const MagickSizeType length,unsigned char *magick_restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PREAD)
if (lseek(cache_info->file,offset,SEEK_SET) < 0)
return((MagickOffsetType) -1);
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PREAD)
count=read(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX));
#else
count=pread(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX),(off_t) (offset+i));
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
return(i);
}
static MagickBooleanType ReadPixelCacheMetacontent(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register ssize_t
y;
register unsigned char
*magick_restrict q;
size_t
rows;
if (cache_info->metacontent_extent == 0)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*
cache_info->metacontent_extent;
extent=length*nexus_info->region.height;
rows=nexus_info->region.height;
y=0;
q=(unsigned char *) nexus_info->metacontent;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register unsigned char
*magick_restrict p;
/*
Read meta-content from memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
p=(unsigned char *) cache_info->metacontent+offset*
cache_info->metacontent_extent;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->metacontent_extent*cache_info->columns;
q+=cache_info->metacontent_extent*nexus_info->region.width;
}
break;
}
case DiskCache:
{
/*
Read meta content from disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
extent=(MagickSizeType) cache_info->columns*cache_info->rows;
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadPixelCacheRegion(cache_info,cache_info->offset+extent*
cache_info->number_channels*sizeof(Quantum)+offset*
cache_info->metacontent_extent,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
offset+=cache_info->columns;
q+=cache_info->metacontent_extent*nexus_info->region.width;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Read metacontent from distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadDistributePixelCacheMetacontent((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
q+=cache_info->metacontent_extent*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToReadPixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPixelCachePixels() reads pixels from the specified region of the pixel
% cache.
%
% The format of the ReadPixelCachePixels() method is:
%
% MagickBooleanType ReadPixelCachePixels(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to read the pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ReadPixelCachePixels(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register Quantum
*magick_restrict q;
register ssize_t
y;
size_t
number_channels,
rows;
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns;
if ((ssize_t) (offset/cache_info->columns) != nexus_info->region.y)
return(MagickFalse);
offset+=nexus_info->region.x;
number_channels=cache_info->number_channels;
length=(MagickSizeType) number_channels*nexus_info->region.width*
sizeof(Quantum);
if ((length/number_channels/sizeof(Quantum)) != nexus_info->region.width)
return(MagickFalse);
rows=nexus_info->region.height;
extent=length*rows;
if ((extent == 0) || ((extent/length) != rows))
return(MagickFalse);
y=0;
q=nexus_info->pixels;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register Quantum
*magick_restrict p;
/*
Read pixels from memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
p=cache_info->pixels+offset*cache_info->number_channels;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->number_channels*cache_info->columns;
q+=cache_info->number_channels*nexus_info->region.width;
}
break;
}
case DiskCache:
{
/*
Read pixels from disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadPixelCacheRegion(cache_info,cache_info->offset+offset*
cache_info->number_channels*sizeof(*q),length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
offset+=cache_info->columns;
q+=cache_info->number_channels*nexus_info->region.width;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Read pixels from distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadDistributePixelCachePixels((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
q+=cache_info->number_channels*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToReadPixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e f e r e n c e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReferencePixelCache() increments the reference count associated with the
% pixel cache returning a pointer to the cache.
%
% The format of the ReferencePixelCache method is:
%
% Cache ReferencePixelCache(Cache cache_info)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
*/
MagickPrivate Cache ReferencePixelCache(Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache *) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
LockSemaphoreInfo(cache_info->semaphore);
cache_info->reference_count++;
UnlockSemaphoreInfo(cache_info->semaphore);
return(cache_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e s e t P i x e l C a c h e C h a n n e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetPixelCacheChannels() resets the pixel cache channels.
%
% The format of the ResetPixelCacheChannels method is:
%
% void ResetPixelCacheChannels(Image *)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickPrivate void ResetPixelCacheChannels(Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
cache_info->number_channels=GetPixelChannels(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e s e t P i x e l C a c h e E p o c h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetPixelCacheEpoch() resets the pixel cache epoch.
%
% The format of the ResetPixelCacheEpoch method is:
%
% void ResetPixelCacheEpoch(void)
%
*/
MagickPrivate void ResetPixelCacheEpoch(void)
{
cache_epoch=0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheMethods() sets the image pixel methods to the specified ones.
%
% The format of the SetPixelCacheMethods() method is:
%
% SetPixelCacheMethods(Cache *,CacheMethods *cache_methods)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o cache_methods: Specifies a pointer to a CacheMethods structure.
%
*/
MagickPrivate void SetPixelCacheMethods(Cache cache,CacheMethods *cache_methods)
{
CacheInfo
*magick_restrict cache_info;
GetOneAuthenticPixelFromHandler
get_one_authentic_pixel_from_handler;
GetOneVirtualPixelFromHandler
get_one_virtual_pixel_from_handler;
/*
Set cache pixel methods.
*/
assert(cache != (Cache) NULL);
assert(cache_methods != (CacheMethods *) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
if (cache_methods->get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL)
cache_info->methods.get_virtual_pixel_handler=
cache_methods->get_virtual_pixel_handler;
if (cache_methods->destroy_pixel_handler != (DestroyPixelHandler) NULL)
cache_info->methods.destroy_pixel_handler=
cache_methods->destroy_pixel_handler;
if (cache_methods->get_virtual_metacontent_from_handler !=
(GetVirtualMetacontentFromHandler) NULL)
cache_info->methods.get_virtual_metacontent_from_handler=
cache_methods->get_virtual_metacontent_from_handler;
if (cache_methods->get_authentic_pixels_handler !=
(GetAuthenticPixelsHandler) NULL)
cache_info->methods.get_authentic_pixels_handler=
cache_methods->get_authentic_pixels_handler;
if (cache_methods->queue_authentic_pixels_handler !=
(QueueAuthenticPixelsHandler) NULL)
cache_info->methods.queue_authentic_pixels_handler=
cache_methods->queue_authentic_pixels_handler;
if (cache_methods->sync_authentic_pixels_handler !=
(SyncAuthenticPixelsHandler) NULL)
cache_info->methods.sync_authentic_pixels_handler=
cache_methods->sync_authentic_pixels_handler;
if (cache_methods->get_authentic_pixels_from_handler !=
(GetAuthenticPixelsFromHandler) NULL)
cache_info->methods.get_authentic_pixels_from_handler=
cache_methods->get_authentic_pixels_from_handler;
if (cache_methods->get_authentic_metacontent_from_handler !=
(GetAuthenticMetacontentFromHandler) NULL)
cache_info->methods.get_authentic_metacontent_from_handler=
cache_methods->get_authentic_metacontent_from_handler;
get_one_virtual_pixel_from_handler=
cache_info->methods.get_one_virtual_pixel_from_handler;
if (get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
cache_info->methods.get_one_virtual_pixel_from_handler=
cache_methods->get_one_virtual_pixel_from_handler;
get_one_authentic_pixel_from_handler=
cache_methods->get_one_authentic_pixel_from_handler;
if (get_one_authentic_pixel_from_handler !=
(GetOneAuthenticPixelFromHandler) NULL)
cache_info->methods.get_one_authentic_pixel_from_handler=
cache_methods->get_one_authentic_pixel_from_handler;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t P i x e l C a c h e N e x u s P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheNexusPixels() defines the region of the cache for the
% specified cache nexus.
%
% The format of the SetPixelCacheNexusPixels() method is:
%
% Quantum SetPixelCacheNexusPixels(const CacheInfo *cache_info,
% const MapMode mode,const RectangleInfo *region,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o mode: ReadMode, WriteMode, or IOMode.
%
% o region: A pointer to the RectangleInfo structure that defines the
% region of this particular cache nexus.
%
% o nexus_info: the cache nexus to set.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType AcquireCacheNexusPixels(
const CacheInfo *magick_restrict cache_info,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
if (nexus_info->length != (MagickSizeType) ((size_t) nexus_info->length))
return(MagickFalse);
if (cache_anonymous_memory <= 0)
{
nexus_info->mapped=MagickFalse;
nexus_info->cache=(Quantum *) MagickAssumeAligned(AcquireAlignedMemory(1,
(size_t) nexus_info->length));
if (nexus_info->cache != (Quantum *) NULL)
ResetMagickMemory(nexus_info->cache,0,(size_t) nexus_info->length);
}
else
{
nexus_info->mapped=MagickTrue;
nexus_info->cache=(Quantum *) MapBlob(-1,IOMode,0,(size_t)
nexus_info->length);
}
if (nexus_info->cache == (Quantum *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
cache_info->filename);
return(MagickFalse);
}
return(MagickTrue);
}
static inline MagickBooleanType IsPixelCacheAuthentic(
const CacheInfo *magick_restrict cache_info,
const NexusInfo *magick_restrict nexus_info)
{
MagickBooleanType
status;
MagickOffsetType
offset;
/*
Does nexus pixels point directly to in-core cache pixels or is it buffered?
*/
if (cache_info->type == PingCache)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
status=nexus_info->pixels == (cache_info->pixels+offset*
cache_info->number_channels) ? MagickTrue : MagickFalse;
return(status);
}
static inline void PrefetchPixelCacheNexusPixels(const NexusInfo *nexus_info,
const MapMode mode)
{
if (mode == ReadMode)
{
MagickCachePrefetch((unsigned char *) nexus_info->pixels,0,1);
return;
}
MagickCachePrefetch((unsigned char *) nexus_info->pixels,1,1);
}
static Quantum *SetPixelCacheNexusPixels(const CacheInfo *cache_info,
const MapMode mode,const RectangleInfo *region,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
MagickBooleanType
status;
MagickSizeType
length,
number_pixels;
assert(cache_info != (const CacheInfo *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return((Quantum *) NULL);
nexus_info->region=(*region);
if ((cache_info->type == MemoryCache) || (cache_info->type == MapCache))
{
ssize_t
x,
y;
x=nexus_info->region.x+(ssize_t) nexus_info->region.width-1;
y=nexus_info->region.y+(ssize_t) nexus_info->region.height-1;
if (((nexus_info->region.x >= 0) && (x < (ssize_t) cache_info->columns) &&
(nexus_info->region.y >= 0) && (y < (ssize_t) cache_info->rows)) &&
((nexus_info->region.height == 1UL) || ((nexus_info->region.x == 0) &&
((nexus_info->region.width == cache_info->columns) ||
((nexus_info->region.width % cache_info->columns) == 0)))))
{
MagickOffsetType
offset;
/*
Pixels are accessed directly from memory.
*/
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
nexus_info->pixels=cache_info->pixels+cache_info->number_channels*
offset;
nexus_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
nexus_info->metacontent=(unsigned char *) cache_info->metacontent+
offset*cache_info->metacontent_extent;
PrefetchPixelCacheNexusPixels(nexus_info,mode);
nexus_info->authentic_pixel_cache=IsPixelCacheAuthentic(cache_info,
nexus_info);
return(nexus_info->pixels);
}
}
/*
Pixels are stored in a staging region until they are synced to the cache.
*/
number_pixels=(MagickSizeType) nexus_info->region.width*
nexus_info->region.height;
length=number_pixels*cache_info->number_channels*sizeof(Quantum);
if (cache_info->metacontent_extent != 0)
length+=number_pixels*cache_info->metacontent_extent;
if (nexus_info->cache == (Quantum *) NULL)
{
nexus_info->length=length;
status=AcquireCacheNexusPixels(cache_info,nexus_info,exception);
if (status == MagickFalse)
{
nexus_info->length=0;
return((Quantum *) NULL);
}
}
else
if (nexus_info->length < length)
{
RelinquishCacheNexusPixels(nexus_info);
nexus_info->length=length;
status=AcquireCacheNexusPixels(cache_info,nexus_info,exception);
if (status == MagickFalse)
{
nexus_info->length=0;
return((Quantum *) NULL);
}
}
nexus_info->pixels=nexus_info->cache;
nexus_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
nexus_info->metacontent=(void *) (nexus_info->pixels+number_pixels*
cache_info->number_channels);
PrefetchPixelCacheNexusPixels(nexus_info,mode);
nexus_info->authentic_pixel_cache=IsPixelCacheAuthentic(cache_info,
nexus_info);
return(nexus_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t P i x e l C a c h e V i r t u a l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheVirtualMethod() sets the "virtual pixels" method for the
% pixel cache and returns the previous setting. A virtual pixel is any pixel
% access that is outside the boundaries of the image cache.
%
% The format of the SetPixelCacheVirtualMethod() method is:
%
% VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image,
% const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: choose the type of virtual pixel.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType SetCacheAlphaChannel(Image *image,const Quantum alpha,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
CacheView
*magick_restrict image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
image->alpha_trait=BlendPixelTrait;
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception); /* must be virtual */
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,1,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelAlpha(image,alpha,q);
q+=GetPixelChannels(image);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
}
image_view=DestroyCacheView(image_view);
return(status);
}
MagickPrivate VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image,
const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
VirtualPixelMethod
method;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
method=cache_info->virtual_pixel_method;
cache_info->virtual_pixel_method=virtual_pixel_method;
if ((image->columns != 0) && (image->rows != 0))
switch (virtual_pixel_method)
{
case BackgroundVirtualPixelMethod:
{
if ((image->background_color.alpha_trait != UndefinedPixelTrait) &&
(image->alpha_trait == UndefinedPixelTrait))
(void) SetCacheAlphaChannel(image,OpaqueAlpha,exception);
if ((IsPixelInfoGray(&image->background_color) == MagickFalse) &&
(IsGrayColorspace(image->colorspace) != MagickFalse))
(void) SetImageColorspace(image,sRGBColorspace,exception);
break;
}
case TransparentVirtualPixelMethod:
{
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetCacheAlphaChannel(image,OpaqueAlpha,exception);
break;
}
default:
break;
}
return(method);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c O p e n C L B u f f e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticOpenCLBuffer() makes sure that all the OpenCL operations have
% been completed and updates the host memory.
%
% The format of the SyncAuthenticOpenCLBuffer() method is:
%
% void SyncAuthenticOpenCLBuffer(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void CopyOpenCLBuffer(CacheInfo *magick_restrict cache_info)
{
assert(cache_info != (CacheInfo *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if ((cache_info->type != MemoryCache) ||
(cache_info->opencl == (MagickCLCacheInfo) NULL))
return;
/*
Ensure single threaded access to OpenCL environment.
*/
LockSemaphoreInfo(cache_info->semaphore);
cache_info->opencl=(MagickCLCacheInfo) CopyMagickCLCacheInfo(
cache_info->opencl);
UnlockSemaphoreInfo(cache_info->semaphore);
}
MagickPrivate void SyncAuthenticOpenCLBuffer(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
cache_info=(CacheInfo *) image->cache;
CopyOpenCLBuffer(cache_info);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixelCacheNexus() saves the authentic image pixels to the
% in-memory or disk cache. The method returns MagickTrue if the pixel region
% is synced, otherwise MagickFalse.
%
% The format of the SyncAuthenticPixelCacheNexus() method is:
%
% MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to sync.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickBooleanType
status;
/*
Transfer pixels to the cache.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->cache == (Cache) NULL)
ThrowBinaryException(CacheError,"PixelCacheIsNotOpen",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
{
image->taint=MagickTrue;
return(MagickTrue);
}
assert(cache_info->signature == MagickCoreSignature);
status=WritePixelCachePixels(cache_info,nexus_info,exception);
if ((cache_info->metacontent_extent != 0) &&
(WritePixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if (status != MagickFalse)
image->taint=MagickTrue;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixelsCache() saves the authentic image pixels to the in-memory
% or disk cache. The method returns MagickTrue if the pixel region is synced,
% otherwise MagickFalse.
%
% The format of the SyncAuthenticPixelsCache() method is:
%
% MagickBooleanType SyncAuthenticPixelsCache(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType SyncAuthenticPixelsCache(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id],
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixels() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is flushed, otherwise
% MagickFalse.
%
% The format of the SyncAuthenticPixels() method is:
%
% MagickBooleanType SyncAuthenticPixels(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SyncAuthenticPixels(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.sync_authentic_pixels_handler !=
(SyncAuthenticPixelsHandler) NULL)
{
status=cache_info->methods.sync_authentic_pixels_handler(image,
exception);
return(status);
}
assert(id < (int) cache_info->number_threads);
status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id],
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImagePixelCache() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is flushed, otherwise
% MagickFalse.
%
% The format of the SyncImagePixelCache() method is:
%
% MagickBooleanType SyncImagePixelCache(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate MagickBooleanType SyncImagePixelCache(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(exception != (ExceptionInfo *) NULL);
cache_info=(CacheInfo *) GetImagePixelCache(image,MagickTrue,exception);
return(cache_info == (CacheInfo *) NULL ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ W r i t e P i x e l C a c h e M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePixelCacheMetacontent() writes the meta-content to the specified region
% of the pixel cache.
%
% The format of the WritePixelCacheMetacontent() method is:
%
% MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to write the meta-content.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register const unsigned char
*magick_restrict p;
register ssize_t
y;
size_t
rows;
if (cache_info->metacontent_extent == 0)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*
cache_info->metacontent_extent;
extent=(MagickSizeType) length*nexus_info->region.height;
rows=nexus_info->region.height;
y=0;
p=(unsigned char *) nexus_info->metacontent;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register unsigned char
*magick_restrict q;
/*
Write associated pixels to memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
q=(unsigned char *) cache_info->metacontent+offset*
cache_info->metacontent_extent;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=nexus_info->region.width*cache_info->metacontent_extent;
q+=cache_info->columns*cache_info->metacontent_extent;
}
break;
}
case DiskCache:
{
/*
Write associated pixels to disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
extent=(MagickSizeType) cache_info->columns*cache_info->rows;
for (y=0; y < (ssize_t) rows; y++)
{
count=WritePixelCacheRegion(cache_info,cache_info->offset+extent*
cache_info->number_channels*sizeof(Quantum)+offset*
cache_info->metacontent_extent,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->metacontent_extent*nexus_info->region.width;
offset+=cache_info->columns;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Write metacontent to distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WriteDistributePixelCacheMetacontent((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->metacontent_extent*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToWritePixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ W r i t e C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePixelCachePixels() writes image pixels to the specified region of the
% pixel cache.
%
% The format of the WritePixelCachePixels() method is:
%
% MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to write the pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePixelCachePixels(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register const Quantum
*magick_restrict p;
register ssize_t
y;
size_t
rows;
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) cache_info->number_channels*nexus_info->region.width*
sizeof(Quantum);
extent=length*nexus_info->region.height;
rows=nexus_info->region.height;
y=0;
p=nexus_info->pixels;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register Quantum
*magick_restrict q;
/*
Write pixels to memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
q=cache_info->pixels+offset*cache_info->number_channels;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->number_channels*nexus_info->region.width;
q+=cache_info->columns*cache_info->number_channels;
}
break;
}
case DiskCache:
{
/*
Write pixels to disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WritePixelCacheRegion(cache_info,cache_info->offset+offset*
cache_info->number_channels*sizeof(*p),length,(const unsigned char *)
p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->number_channels*nexus_info->region.width;
offset+=cache_info->columns;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Write pixels to distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WriteDistributePixelCachePixels((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->number_channels*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToWritePixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
|
fx.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% FFFFF X X %
% F X X %
% FFF X %
% F X X %
% F X X %
% %
% %
% MagickCore Image Special Effects Methods %
% %
% Software Design %
% Cristy %
% October 1996 %
% %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/annotate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/decorate.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/effect.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/fx.h"
#include "MagickCore/fx-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/layer.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/transform.h"
#include "MagickCore/transform-private.h"
#include "MagickCore/utility.h"
/*
Typedef declarations.
*/
typedef enum
{
BitwiseAndAssignmentOperator = 0xd9U,
BitwiseOrAssignmentOperator,
LeftShiftAssignmentOperator,
RightShiftAssignmentOperator,
PowerAssignmentOperator,
ModuloAssignmentOperator,
PlusAssignmentOperator,
SubtractAssignmentOperator,
MultiplyAssignmentOperator,
DivideAssignmentOperator,
IncrementAssignmentOperator,
DecrementAssignmentOperator,
LeftShiftOperator,
RightShiftOperator,
LessThanEqualOperator,
GreaterThanEqualOperator,
EqualOperator,
NotEqualOperator,
LogicalAndOperator,
LogicalOrOperator,
ExponentialNotation
} FxOperator;
struct _FxInfo
{
const Image
*images;
char
*expression;
FILE
*file;
SplayTreeInfo
*colors,
*symbols;
CacheView
**view;
RandomInfo
*random_info;
ExceptionInfo
*exception;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e F x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireFxInfo() allocates the FxInfo structure.
%
% The format of the AcquireFxInfo method is:
%
% FxInfo *AcquireFxInfo(Image *images,const char *expression,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o expression: the expression.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate FxInfo *AcquireFxInfo(const Image *images,const char *expression,
ExceptionInfo *exception)
{
const Image
*next;
FxInfo
*fx_info;
register ssize_t
i;
unsigned char
fx_op[2];
fx_info=(FxInfo *) AcquireCriticalMemory(sizeof(*fx_info));
(void) memset(fx_info,0,sizeof(*fx_info));
fx_info->exception=AcquireExceptionInfo();
fx_info->images=images;
fx_info->colors=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
RelinquishMagickMemory);
fx_info->symbols=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
RelinquishMagickMemory);
fx_info->view=(CacheView **) AcquireQuantumMemory(GetImageListLength(
fx_info->images),sizeof(*fx_info->view));
if (fx_info->view == (CacheView **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
i=0;
next=GetFirstImageInList(fx_info->images);
for ( ; next != (Image *) NULL; next=next->next)
{
fx_info->view[i]=AcquireVirtualCacheView(next,exception);
i++;
}
fx_info->random_info=AcquireRandomInfo();
fx_info->expression=ConstantString(expression);
fx_info->file=stderr;
/*
Convert compound to simple operators.
*/
fx_op[1]='\0';
*fx_op=(unsigned char) BitwiseAndAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"&=",(char *) fx_op);
*fx_op=(unsigned char) BitwiseOrAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"|=",(char *) fx_op);
*fx_op=(unsigned char) LeftShiftAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"<<=",(char *) fx_op);
*fx_op=(unsigned char) RightShiftAssignmentOperator;
(void) SubstituteString(&fx_info->expression,">>=",(char *) fx_op);
*fx_op=(unsigned char) PowerAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"^=",(char *) fx_op);
*fx_op=(unsigned char) ModuloAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"%=",(char *) fx_op);
*fx_op=(unsigned char) PlusAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"+=",(char *) fx_op);
*fx_op=(unsigned char) SubtractAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"-=",(char *) fx_op);
*fx_op=(unsigned char) MultiplyAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"*=",(char *) fx_op);
*fx_op=(unsigned char) DivideAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"/=",(char *) fx_op);
*fx_op=(unsigned char) IncrementAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"++",(char *) fx_op);
*fx_op=(unsigned char) DecrementAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"--",(char *) fx_op);
*fx_op=(unsigned char) LeftShiftOperator;
(void) SubstituteString(&fx_info->expression,"<<",(char *) fx_op);
*fx_op=(unsigned char) RightShiftOperator;
(void) SubstituteString(&fx_info->expression,">>",(char *) fx_op);
*fx_op=(unsigned char) LessThanEqualOperator;
(void) SubstituteString(&fx_info->expression,"<=",(char *) fx_op);
*fx_op=(unsigned char) GreaterThanEqualOperator;
(void) SubstituteString(&fx_info->expression,">=",(char *) fx_op);
*fx_op=(unsigned char) EqualOperator;
(void) SubstituteString(&fx_info->expression,"==",(char *) fx_op);
*fx_op=(unsigned char) NotEqualOperator;
(void) SubstituteString(&fx_info->expression,"!=",(char *) fx_op);
*fx_op=(unsigned char) LogicalAndOperator;
(void) SubstituteString(&fx_info->expression,"&&",(char *) fx_op);
*fx_op=(unsigned char) LogicalOrOperator;
(void) SubstituteString(&fx_info->expression,"||",(char *) fx_op);
*fx_op=(unsigned char) ExponentialNotation;
(void) SubstituteString(&fx_info->expression,"**",(char *) fx_op);
/*
Force right-to-left associativity for unary negation.
*/
(void) SubstituteString(&fx_info->expression,"-","-1.0*");
(void) SubstituteString(&fx_info->expression,"^-1.0*","^-");
(void) SubstituteString(&fx_info->expression,"E-1.0*","E-");
(void) SubstituteString(&fx_info->expression,"e-1.0*","e-");
(void) SubstituteString(&fx_info->expression," ",""); /* compact string */
return(fx_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y F x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyFxInfo() deallocates memory associated with an FxInfo structure.
%
% The format of the DestroyFxInfo method is:
%
% ImageInfo *DestroyFxInfo(ImageInfo *fx_info)
%
% A description of each parameter follows:
%
% o fx_info: the fx info.
%
*/
MagickPrivate FxInfo *DestroyFxInfo(FxInfo *fx_info)
{
register ssize_t
i;
fx_info->exception=DestroyExceptionInfo(fx_info->exception);
fx_info->expression=DestroyString(fx_info->expression);
fx_info->symbols=DestroySplayTree(fx_info->symbols);
fx_info->colors=DestroySplayTree(fx_info->colors);
for (i=(ssize_t) GetImageListLength(fx_info->images)-1; i >= 0; i--)
fx_info->view[i]=DestroyCacheView(fx_info->view[i]);
fx_info->view=(CacheView **) RelinquishMagickMemory(fx_info->view);
fx_info->random_info=DestroyRandomInfo(fx_info->random_info);
fx_info=(FxInfo *) RelinquishMagickMemory(fx_info);
return(fx_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ F x E v a l u a t e C h a n n e l E x p r e s s i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FxEvaluateChannelExpression() evaluates an expression and returns the
% results.
%
% The format of the FxEvaluateExpression method is:
%
% double FxEvaluateChannelExpression(FxInfo *fx_info,
% const PixelChannel channel,const ssize_t x,const ssize_t y,
% double *alpha,Exceptioninfo *exception)
% double FxEvaluateExpression(FxInfo *fx_info,
% double *alpha,Exceptioninfo *exception)
%
% A description of each parameter follows:
%
% o fx_info: the fx info.
%
% o channel: the channel.
%
% o x,y: the pixel position.
%
% o alpha: the result.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline const double *GetFxSymbolValue(FxInfo *magick_restrict fx_info,
const char *symbol)
{
return((const double *) GetValueFromSplayTree(fx_info->symbols,symbol));
}
static inline MagickBooleanType SetFxSymbolValue(
FxInfo *magick_restrict fx_info,const char *magick_restrict symbol,
double const value)
{
double
*object;
object=(double *) GetValueFromSplayTree(fx_info->symbols,symbol);
if (object != (double *) NULL)
{
*object=value;
return(MagickTrue);
}
object=(double *) AcquireQuantumMemory(1,sizeof(*object));
if (object == (double *) NULL)
{
(void) ThrowMagickException(fx_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
fx_info->images->filename);
return(MagickFalse);
}
*object=value;
return(AddValueToSplayTree(fx_info->symbols,ConstantString(symbol),object));
}
static double FxChannelStatistics(FxInfo *fx_info,Image *image,
PixelChannel channel,const char *symbol,ExceptionInfo *exception)
{
ChannelType
channel_mask;
char
key[MagickPathExtent];
const double
*value;
double
statistic;
register const char
*p;
channel_mask=UndefinedChannel;
for (p=symbol; (*p != '.') && (*p != '\0'); p++) ;
if (*p == '.')
{
ssize_t
option;
option=ParseCommandOption(MagickPixelChannelOptions,MagickTrue,p+1);
if (option >= 0)
{
channel=(PixelChannel) option;
channel_mask=SetPixelChannelMask(image,(ChannelType)
(1UL << channel));
}
}
(void) FormatLocaleString(key,MagickPathExtent,"%p.%.20g.%s",(void *) image,
(double) channel,symbol);
value=GetFxSymbolValue(fx_info,key);
if (value != (const double *) NULL)
{
if (channel_mask != UndefinedChannel)
(void) SetPixelChannelMask(image,channel_mask);
return(QuantumScale*(*value));
}
statistic=0.0;
if (LocaleNCompare(symbol,"depth",5) == 0)
{
size_t
depth;
depth=GetImageDepth(image,exception);
statistic=(double) depth;
}
if (LocaleNCompare(symbol,"kurtosis",8) == 0)
{
double
kurtosis,
skewness;
(void) GetImageKurtosis(image,&kurtosis,&skewness,exception);
statistic=kurtosis;
}
if (LocaleNCompare(symbol,"maxima",6) == 0)
{
double
maxima,
minima;
(void) GetImageRange(image,&minima,&maxima,exception);
statistic=maxima;
}
if (LocaleNCompare(symbol,"mean",4) == 0)
{
double
mean,
standard_deviation;
(void) GetImageMean(image,&mean,&standard_deviation,exception);
statistic=mean;
}
if (LocaleNCompare(symbol,"minima",6) == 0)
{
double
maxima,
minima;
(void) GetImageRange(image,&minima,&maxima,exception);
statistic=minima;
}
if (LocaleNCompare(symbol,"skewness",8) == 0)
{
double
kurtosis,
skewness;
(void) GetImageKurtosis(image,&kurtosis,&skewness,exception);
statistic=skewness;
}
if (LocaleNCompare(symbol,"standard_deviation",18) == 0)
{
double
mean,
standard_deviation;
(void) GetImageMean(image,&mean,&standard_deviation,exception);
statistic=standard_deviation;
}
if (channel_mask != UndefinedChannel)
(void) SetPixelChannelMask(image,channel_mask);
if (SetFxSymbolValue(fx_info,key,statistic) == MagickFalse)
return(0.0);
return(QuantumScale*statistic);
}
static double
FxEvaluateSubexpression(FxInfo *,const PixelChannel,const ssize_t,
const ssize_t,const char *,const size_t,double *,ExceptionInfo *);
static inline MagickBooleanType IsFxFunction(const char *expression,
const char *name,const size_t length)
{
int
c;
register size_t
i;
for (i=0; i <= length; i++)
if (expression[i] == '\0')
return(MagickFalse);
c=expression[length];
if ((LocaleNCompare(expression,name,length) == 0) &&
((isspace(c) == 0) || (c == '(')))
return(MagickTrue);
return(MagickFalse);
}
static MagickOffsetType FxGCD(MagickOffsetType alpha,MagickOffsetType beta)
{
if (beta != 0)
return(FxGCD(beta,alpha % beta));
return(alpha);
}
static inline const char *FxSubexpression(const char *expression,
ExceptionInfo *exception)
{
const char
*subexpression;
register ssize_t
level;
level=0;
subexpression=expression;
while ((*subexpression != '\0') &&
((level != 1) || (strchr(")",(int) *subexpression) == (char *) NULL)))
{
if (strchr("(",(int) *subexpression) != (char *) NULL)
level++;
else
if (strchr(")",(int) *subexpression) != (char *) NULL)
level--;
subexpression++;
}
if (*subexpression == '\0')
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnbalancedParenthesis","`%s'",expression);
return(subexpression);
}
static double FxGetSymbol(FxInfo *fx_info,const PixelChannel channel,
const ssize_t x,const ssize_t y,const char *expression,const size_t depth,
ExceptionInfo *exception)
{
char
*q,
symbol[MagickPathExtent];
const char
*p;
const double
*value;
double
alpha,
beta;
Image
*image;
MagickBooleanType
status;
PixelInfo
pixel;
PointInfo
point;
register ssize_t
i;
size_t
level;
p=expression;
i=GetImageIndexInList(fx_info->images);
level=0;
point.x=(double) x;
point.y=(double) y;
if (isalpha((int) ((unsigned char) *(p+1))) == 0)
{
char
*subexpression;
subexpression=AcquireString(expression);
if (strchr("suv",(int) *p) != (char *) NULL)
{
switch (*p)
{
case 's':
default:
{
i=GetImageIndexInList(fx_info->images);
break;
}
case 'u': i=0; break;
case 'v': i=1; break;
}
p++;
if (*p == '[')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '[')
level++;
else
if (*p == ']')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
depth,&beta,exception);
i=(ssize_t) alpha;
if (*p != '\0')
p++;
}
if (*p == '.')
p++;
}
if ((*p == 'p') && (isalpha((int) ((unsigned char) *(p+1))) == 0))
{
p++;
if (*p == '{')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '{')
level++;
else
if (*p == '}')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
depth,&beta,exception);
point.x=alpha;
point.y=beta;
if (*p != '\0')
p++;
}
else
if (*p == '[')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '[')
level++;
else
if (*p == ']')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
depth,&beta,exception);
point.x+=alpha;
point.y+=beta;
if (*p != '\0')
p++;
}
if (*p == '.')
p++;
}
subexpression=DestroyString(subexpression);
}
image=GetImageFromList(fx_info->images,i);
if (image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"NoSuchImage","`%s'",expression);
return(0.0);
}
i=GetImageIndexInList(image);
GetPixelInfo(image,&pixel);
status=InterpolatePixelInfo(image,fx_info->view[i],image->interpolate,
point.x,point.y,&pixel,exception);
(void) status;
if ((*p != '\0') && (*(p+1) != '\0') && (*(p+2) != '\0') &&
(LocaleCompare(p,"intensity") != 0) && (LocaleCompare(p,"luma") != 0) &&
(LocaleCompare(p,"luminance") != 0) && (LocaleCompare(p,"hue") != 0) &&
(LocaleCompare(p,"saturation") != 0) &&
(LocaleCompare(p,"lightness") != 0))
{
char
name[MagickPathExtent];
size_t
length;
(void) CopyMagickString(name,p,MagickPathExtent);
length=strlen(name);
for (q=name+length-1; q > name; q--)
{
if (*q == ')')
break;
if (*q == '.')
{
*q='\0';
break;
}
}
q=name;
if ((*q != '\0') && (*(q+1) != '\0') && (*(q+2) != '\0') &&
(GetFxSymbolValue(fx_info,name) == (const double *) NULL))
{
PixelInfo
*color;
color=(PixelInfo *) GetValueFromSplayTree(fx_info->colors,name);
if (color != (PixelInfo *) NULL)
{
pixel=(*color);
p+=length;
}
else
{
MagickBooleanType
status;
status=QueryColorCompliance(name,AllCompliance,&pixel,
fx_info->exception);
if (status != MagickFalse)
{
(void) AddValueToSplayTree(fx_info->colors,
ConstantString(name),ClonePixelInfo(&pixel));
p+=length;
}
}
}
}
(void) CopyMagickString(symbol,p,MagickPathExtent);
StripString(symbol);
if (*symbol == '\0')
{
switch (channel)
{
case RedPixelChannel: return(QuantumScale*pixel.red);
case GreenPixelChannel: return(QuantumScale*pixel.green);
case BluePixelChannel: return(QuantumScale*pixel.blue);
case BlackPixelChannel:
{
if (image->colorspace != CMYKColorspace)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ImageError,"ColorSeparatedImageRequired","`%s'",
image->filename);
return(0.0);
}
return(QuantumScale*pixel.black);
}
case AlphaPixelChannel:
{
if (pixel.alpha_trait == UndefinedPixelTrait)
return(1.0);
alpha=(double) (QuantumScale*pixel.alpha);
return(alpha);
}
case CompositePixelChannel:
{
Quantum
quantum_pixel[MaxPixelChannels];
SetPixelViaPixelInfo(image,&pixel,quantum_pixel);
return(QuantumScale*GetPixelIntensity(image,quantum_pixel));
}
case IndexPixelChannel:
return(0.0);
default:
break;
}
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnableToParseExpression","`%s'",p);
return(0.0);
}
switch (*symbol)
{
case 'A':
case 'a':
{
if (LocaleCompare(symbol,"a") == 0)
return((QuantumScale*pixel.alpha));
break;
}
case 'B':
case 'b':
{
if (LocaleCompare(symbol,"b") == 0)
return(QuantumScale*pixel.blue);
break;
}
case 'C':
case 'c':
{
if (IsFxFunction(symbol,"channel",7) != MagickFalse)
{
GeometryInfo
channel_info;
MagickStatusType
flags;
flags=ParseGeometry(symbol+7,&channel_info);
if (image->colorspace == CMYKColorspace)
switch (channel)
{
case CyanPixelChannel:
{
if ((flags & RhoValue) == 0)
return(0.0);
return(channel_info.rho);
}
case MagentaPixelChannel:
{
if ((flags & SigmaValue) == 0)
return(0.0);
return(channel_info.sigma);
}
case YellowPixelChannel:
{
if ((flags & XiValue) == 0)
return(0.0);
return(channel_info.xi);
}
case BlackPixelChannel:
{
if ((flags & PsiValue) == 0)
return(0.0);
return(channel_info.psi);
}
case AlphaPixelChannel:
{
if ((flags & ChiValue) == 0)
return(0.0);
return(channel_info.chi);
}
default:
return(0.0);
}
switch (channel)
{
case RedPixelChannel:
{
if ((flags & RhoValue) == 0)
return(0.0);
return(channel_info.rho);
}
case GreenPixelChannel:
{
if ((flags & SigmaValue) == 0)
return(0.0);
return(channel_info.sigma);
}
case BluePixelChannel:
{
if ((flags & XiValue) == 0)
return(0.0);
return(channel_info.xi);
}
case BlackPixelChannel:
{
if ((flags & ChiValue) == 0)
return(0.0);
return(channel_info.chi);
}
case AlphaPixelChannel:
{
if ((flags & PsiValue) == 0)
return(0.0);
return(channel_info.psi);
}
default:
return(0.0);
}
}
if (LocaleCompare(symbol,"c") == 0)
return(QuantumScale*pixel.red);
break;
}
case 'D':
case 'd':
{
if (LocaleNCompare(symbol,"depth",5) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
break;
}
case 'E':
case 'e':
{
if (LocaleCompare(symbol,"extent") == 0)
{
if (image->extent != 0)
return((double) image->extent);
return((double) GetBlobSize(image));
}
break;
}
case 'G':
case 'g':
{
if (LocaleCompare(symbol,"g") == 0)
return(QuantumScale*pixel.green);
break;
}
case 'K':
case 'k':
{
if (LocaleNCompare(symbol,"kurtosis",8) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleCompare(symbol,"k") == 0)
{
if (image->colorspace != CMYKColorspace)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ColorSeparatedImageRequired","`%s'",
image->filename);
return(0.0);
}
return(QuantumScale*pixel.black);
}
break;
}
case 'H':
case 'h':
{
if (LocaleCompare(symbol,"h") == 0)
return((double) image->rows);
if (LocaleCompare(symbol,"hue") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation,
&lightness);
return(hue);
}
break;
}
case 'I':
case 'i':
{
if ((LocaleCompare(symbol,"image.depth") == 0) ||
(LocaleCompare(symbol,"image.minima") == 0) ||
(LocaleCompare(symbol,"image.maxima") == 0) ||
(LocaleCompare(symbol,"image.mean") == 0) ||
(LocaleCompare(symbol,"image.kurtosis") == 0) ||
(LocaleCompare(symbol,"image.skewness") == 0) ||
(LocaleCompare(symbol,"image.standard_deviation") == 0))
return(FxChannelStatistics(fx_info,image,channel,symbol+6,exception));
if (LocaleCompare(symbol,"image.resolution.x") == 0)
return(image->resolution.x);
if (LocaleCompare(symbol,"image.resolution.y") == 0)
return(image->resolution.y);
if (LocaleCompare(symbol,"intensity") == 0)
{
Quantum
quantum_pixel[MaxPixelChannels];
SetPixelViaPixelInfo(image,&pixel,quantum_pixel);
return(QuantumScale*GetPixelIntensity(image,quantum_pixel));
}
if (LocaleCompare(symbol,"i") == 0)
return((double) x);
break;
}
case 'J':
case 'j':
{
if (LocaleCompare(symbol,"j") == 0)
return((double) y);
break;
}
case 'L':
case 'l':
{
if (LocaleCompare(symbol,"lightness") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation,
&lightness);
return(lightness);
}
if (LocaleCompare(symbol,"luma") == 0)
{
double
luma;
luma=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue;
return(QuantumScale*luma);
}
if (LocaleCompare(symbol,"luminance") == 0)
{
double
luminence;
luminence=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue;
return(QuantumScale*luminence);
}
break;
}
case 'M':
case 'm':
{
if (LocaleNCompare(symbol,"maxima",6) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"mean",4) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"minima",6) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleCompare(symbol,"m") == 0)
return(QuantumScale*pixel.green);
break;
}
case 'N':
case 'n':
{
if (LocaleCompare(symbol,"n") == 0)
return((double) GetImageListLength(fx_info->images));
break;
}
case 'O':
case 'o':
{
if (LocaleCompare(symbol,"o") == 0)
return(QuantumScale*pixel.alpha);
break;
}
case 'P':
case 'p':
{
if (LocaleCompare(symbol,"page.height") == 0)
return((double) image->page.height);
if (LocaleCompare(symbol,"page.width") == 0)
return((double) image->page.width);
if (LocaleCompare(symbol,"page.x") == 0)
return((double) image->page.x);
if (LocaleCompare(symbol,"page.y") == 0)
return((double) image->page.y);
if (LocaleCompare(symbol,"printsize.x") == 0)
return(PerceptibleReciprocal(image->resolution.x)*image->columns);
if (LocaleCompare(symbol,"printsize.y") == 0)
return(PerceptibleReciprocal(image->resolution.y)*image->rows);
break;
}
case 'Q':
case 'q':
{
if (LocaleCompare(symbol,"quality") == 0)
return((double) image->quality);
break;
}
case 'R':
case 'r':
{
if (LocaleCompare(symbol,"resolution.x") == 0)
return(image->resolution.x);
if (LocaleCompare(symbol,"resolution.y") == 0)
return(image->resolution.y);
if (LocaleCompare(symbol,"r") == 0)
return(QuantumScale*pixel.red);
break;
}
case 'S':
case 's':
{
if (LocaleCompare(symbol,"saturation") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation,
&lightness);
return(saturation);
}
if (LocaleNCompare(symbol,"skewness",8) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"standard_deviation",18) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
break;
}
case 'T':
case 't':
{
if (LocaleCompare(symbol,"t") == 0)
return((double) GetImageIndexInList(fx_info->images));
break;
}
case 'W':
case 'w':
{
if (LocaleCompare(symbol,"w") == 0)
return((double) image->columns);
break;
}
case 'Y':
case 'y':
{
if (LocaleCompare(symbol,"y") == 0)
return(QuantumScale*pixel.blue);
break;
}
case 'Z':
case 'z':
{
if (LocaleCompare(symbol,"z") == 0)
return((double) GetImageDepth(image,fx_info->exception));
break;
}
default:
break;
}
value=GetFxSymbolValue(fx_info,symbol);
if (value != (const double *) NULL)
return(*value);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UndefinedVariable","`%s'",symbol);
(void) SetFxSymbolValue(fx_info,symbol,0.0);
return(0.0);
}
static const char *FxOperatorPrecedence(const char *expression,
ExceptionInfo *exception)
{
typedef enum
{
UndefinedPrecedence,
NullPrecedence,
BitwiseComplementPrecedence,
ExponentPrecedence,
ExponentialNotationPrecedence,
MultiplyPrecedence,
AdditionPrecedence,
ShiftPrecedence,
RelationalPrecedence,
EquivalencyPrecedence,
BitwiseAndPrecedence,
BitwiseOrPrecedence,
LogicalAndPrecedence,
LogicalOrPrecedence,
TernaryPrecedence,
AssignmentPrecedence,
CommaPrecedence,
SeparatorPrecedence
} FxPrecedence;
FxPrecedence
precedence,
target;
register const char
*subexpression;
register int
c;
size_t
level;
c=(-1);
level=0;
subexpression=(const char *) NULL;
target=NullPrecedence;
while ((c != '\0') && (*expression != '\0'))
{
precedence=UndefinedPrecedence;
if ((isspace((int) ((unsigned char) *expression)) != 0) || (c == (int) '@'))
{
expression++;
continue;
}
switch (*expression)
{
case 'A':
case 'a':
{
#if defined(MAGICKCORE_HAVE_ACOSH)
if (IsFxFunction(expression,"acosh",5) != MagickFalse)
{
expression+=5;
break;
}
#endif
#if defined(MAGICKCORE_HAVE_ASINH)
if (IsFxFunction(expression,"asinh",5) != MagickFalse)
{
expression+=5;
break;
}
#endif
#if defined(MAGICKCORE_HAVE_ATANH)
if (IsFxFunction(expression,"atanh",5) != MagickFalse)
{
expression+=5;
break;
}
#endif
if (IsFxFunction(expression,"atan2",5) != MagickFalse)
{
expression+=5;
break;
}
break;
}
case 'E':
case 'e':
{
if ((isdigit(c) != 0) &&
((LocaleNCompare(expression,"E+",2) == 0) ||
(LocaleNCompare(expression,"E-",2) == 0)))
{
expression+=2; /* scientific notation */
break;
}
}
case 'J':
case 'j':
{
if ((IsFxFunction(expression,"j0",2) != MagickFalse) ||
(IsFxFunction(expression,"j1",2) != MagickFalse))
{
expression+=2;
break;
}
break;
}
case '#':
{
while (isxdigit((int) ((unsigned char) *(expression+1))) != 0)
expression++;
break;
}
default:
break;
}
if ((c == (int) '{') || (c == (int) '['))
level++;
else
if ((c == (int) '}') || (c == (int) ']'))
level--;
if (level == 0)
switch ((unsigned char) *expression)
{
case '~':
case '!':
{
precedence=BitwiseComplementPrecedence;
break;
}
case '^':
case '@':
{
precedence=ExponentPrecedence;
break;
}
default:
{
if (((c != 0) && ((isdigit(c) != 0) ||
(strchr(")",c) != (char *) NULL))) &&
(((islower((int) ((unsigned char) *expression)) != 0) ||
(strchr("(",(int) ((unsigned char) *expression)) != (char *) NULL)) ||
((isdigit(c) == 0) &&
(isdigit((int) ((unsigned char) *expression)) != 0))) &&
(strchr("xy",(int) ((unsigned char) *expression)) == (char *) NULL))
precedence=MultiplyPrecedence;
break;
}
case '*':
case '/':
case '%':
{
precedence=MultiplyPrecedence;
break;
}
case '+':
case '-':
{
if ((strchr("(+-/*%:&^|<>~,",c) == (char *) NULL) ||
(isalpha(c) != 0))
precedence=AdditionPrecedence;
break;
}
case BitwiseAndAssignmentOperator:
case BitwiseOrAssignmentOperator:
case LeftShiftAssignmentOperator:
case RightShiftAssignmentOperator:
case PowerAssignmentOperator:
case ModuloAssignmentOperator:
case PlusAssignmentOperator:
case SubtractAssignmentOperator:
case MultiplyAssignmentOperator:
case DivideAssignmentOperator:
case IncrementAssignmentOperator:
case DecrementAssignmentOperator:
{
precedence=AssignmentPrecedence;
break;
}
case LeftShiftOperator:
case RightShiftOperator:
{
precedence=ShiftPrecedence;
break;
}
case '<':
case LessThanEqualOperator:
case GreaterThanEqualOperator:
case '>':
{
precedence=RelationalPrecedence;
break;
}
case EqualOperator:
case NotEqualOperator:
{
precedence=EquivalencyPrecedence;
break;
}
case '&':
{
precedence=BitwiseAndPrecedence;
break;
}
case '|':
{
precedence=BitwiseOrPrecedence;
break;
}
case LogicalAndOperator:
{
precedence=LogicalAndPrecedence;
break;
}
case LogicalOrOperator:
{
precedence=LogicalOrPrecedence;
break;
}
case ExponentialNotation:
{
precedence=ExponentialNotationPrecedence;
break;
}
case ':':
case '?':
{
precedence=TernaryPrecedence;
break;
}
case '=':
{
precedence=AssignmentPrecedence;
break;
}
case ',':
{
precedence=CommaPrecedence;
break;
}
case ';':
{
precedence=SeparatorPrecedence;
break;
}
}
if ((precedence == BitwiseComplementPrecedence) ||
(precedence == TernaryPrecedence) ||
(precedence == AssignmentPrecedence))
{
if (precedence > target)
{
/*
Right-to-left associativity.
*/
target=precedence;
subexpression=expression;
}
}
else
if (precedence >= target)
{
/*
Left-to-right associativity.
*/
target=precedence;
subexpression=expression;
}
if (strchr("(",(int) *expression) != (char *) NULL)
expression=FxSubexpression(expression,exception);
c=(int) (*expression++);
}
return(subexpression);
}
static double FxEvaluateSubexpression(FxInfo *fx_info,
const PixelChannel channel,const ssize_t x,const ssize_t y,
const char *expression,const size_t depth,double *beta,
ExceptionInfo *exception)
{
#define FxMaxParenthesisDepth 58
#define FxMaxSubexpressionDepth 200
#define FxReturn(value) \
{ \
subexpression=DestroyString(subexpression); \
return(value); \
}
#define FxParseConditional(subexpression,sentinal,p,q) \
{ \
p=subexpression; \
for (q=(char *) p; (*q != (sentinal)) && (*q != '\0'); q++) \
if (*q == '(') \
{ \
for (q++; (*q != ')') && (*q != '\0'); q++); \
if (*q == '\0') \
break; \
} \
if (*q == '\0') \
{ \
(void) ThrowMagickException(exception,GetMagickModule(), \
OptionError,"UnableToParseExpression","`%s'",subexpression); \
FxReturn(0.0); \
} \
if (strlen(q) == 1) \
*(q+1)='\0'; \
*q='\0'; \
}
char
*q,
*subexpression;
double
alpha,
gamma,
sans,
value;
register const char
*p;
*beta=0.0;
sans=0.0;
subexpression=AcquireString(expression);
*subexpression='\0';
if (depth > FxMaxSubexpressionDepth)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnableToParseExpression","`%s'",expression);
FxReturn(0.0);
}
if (exception->severity >= ErrorException)
FxReturn(0.0);
while (isspace((int) ((unsigned char) *expression)) != 0)
expression++;
if (*expression == '\0')
FxReturn(0.0);
p=FxOperatorPrecedence(expression,exception);
if (p != (const char *) NULL)
{
(void) CopyMagickString(subexpression,expression,(size_t)
(p-expression+1));
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth+1,
beta,exception);
switch ((unsigned char) *p)
{
case '~':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
*beta=(double) (~(size_t) *beta);
FxReturn(*beta);
}
case '!':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(*beta == 0.0 ? 1.0 : 0.0);
}
case '^':
{
*beta=pow(alpha,FxEvaluateSubexpression(fx_info,channel,x,y,++p,
depth+1,beta,exception));
FxReturn(*beta);
}
case '*':
case ExponentialNotation:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha*(*beta));
}
case '/':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(PerceptibleReciprocal(*beta)*alpha);
}
case '%':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(fmod(alpha,*beta));
}
case '+':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha+(*beta));
}
case '-':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha-(*beta));
}
case BitwiseAndAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=(double) ((size_t) (alpha+0.5) & (size_t) (*beta+0.5));
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case BitwiseOrAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=(double) ((size_t) (alpha+0.5) | (size_t) (*beta+0.5));
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case LeftShiftAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
if ((size_t) (*beta+0.5) >= (8*sizeof(size_t)))
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ShiftCountOverflow","`%s'",subexpression);
FxReturn(0.0);
}
value=(double) ((size_t) (alpha+0.5) << (size_t) (*beta+0.5));
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case RightShiftAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
if ((size_t) (*beta+0.5) >= (8*sizeof(size_t)))
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ShiftCountOverflow","`%s'",subexpression);
FxReturn(0.0);
}
value=(double) ((size_t) (alpha+0.5) >> (size_t) (*beta+0.5));
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case PowerAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=pow(alpha,*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case ModuloAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=fmod(alpha,*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case PlusAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha+(*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case SubtractAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha-(*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case MultiplyAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha*(*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case DivideAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha*PerceptibleReciprocal(*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case IncrementAssignmentOperator:
{
if (*subexpression == '\0')
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha+1.0;
if (*subexpression == '\0')
{
if (SetFxSymbolValue(fx_info,p,value) == MagickFalse)
return(0.0);
}
else
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case DecrementAssignmentOperator:
{
if (*subexpression == '\0')
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha-1.0;
if (*subexpression == '\0')
{
if (SetFxSymbolValue(fx_info,p,value) == MagickFalse)
return(0.0);
}
else
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case LeftShiftOperator:
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
if ((size_t) (gamma+0.5) >= (8*sizeof(size_t)))
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ShiftCountOverflow","`%s'",subexpression);
FxReturn(0.0);
}
*beta=(double) ((size_t) (alpha+0.5) << (size_t) (gamma+0.5));
FxReturn(*beta);
}
case RightShiftOperator:
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
if ((size_t) (gamma+0.5) >= (8*sizeof(size_t)))
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ShiftCountOverflow","`%s'",subexpression);
FxReturn(0.0);
}
*beta=(double) ((size_t) (alpha+0.5) >> (size_t) (gamma+0.5));
FxReturn(*beta);
}
case '<':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha < *beta ? 1.0 : 0.0);
}
case LessThanEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha <= *beta ? 1.0 : 0.0);
}
case '>':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha > *beta ? 1.0 : 0.0);
}
case GreaterThanEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha >= *beta ? 1.0 : 0.0);
}
case EqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(fabs(alpha-(*beta)) < MagickEpsilon ? 1.0 : 0.0);
}
case NotEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(fabs(alpha-(*beta)) >= MagickEpsilon ? 1.0 : 0.0);
}
case '&':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
*beta=(double) ((size_t) (alpha+0.5) & (size_t) (gamma+0.5));
FxReturn(*beta);
}
case '|':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
*beta=(double) ((size_t) (alpha+0.5) | (size_t) (gamma+0.5));
FxReturn(*beta);
}
case LogicalAndOperator:
{
p++;
if (alpha <= 0.0)
{
*beta=0.0;
FxReturn(*beta);
}
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta,
exception);
*beta=(gamma > 0.0) ? 1.0 : 0.0;
FxReturn(*beta);
}
case LogicalOrOperator:
{
p++;
if (alpha > 0.0)
{
*beta=1.0;
FxReturn(*beta);
}
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta,
exception);
*beta=(gamma > 0.0) ? 1.0 : 0.0;
FxReturn(*beta);
}
case '?':
{
(void) CopyMagickString(subexpression,++p,MagickPathExtent-1);
FxParseConditional(subexpression,':',p,q);
if (fabs(alpha) >= MagickEpsilon)
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta,
exception);
else
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta,
exception);
FxReturn(gamma);
}
case '=':
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=(*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case ',':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha);
}
case ';':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(*beta);
}
default:
{
gamma=alpha*FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,
beta,exception);
FxReturn(gamma);
}
}
}
if (strchr("(",(int) *expression) != (char *) NULL)
{
size_t
length;
if (depth >= FxMaxParenthesisDepth)
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"ParenthesisNestedTooDeeply","`%s'",expression);
length=CopyMagickString(subexpression,expression+1,MagickPathExtent);
if (length != 0)
subexpression[length-1]='\0';
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth+1,
beta,exception);
FxReturn(gamma);
}
switch (*expression)
{
case '+':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1,
beta,exception);
FxReturn(1.0*gamma);
}
case '-':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1,
beta,exception);
FxReturn(-1.0*gamma);
}
case '~':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1,
beta,exception);
FxReturn((double) (~(size_t) (gamma+0.5)));
}
case 'A':
case 'a':
{
if (IsFxFunction(expression,"abs",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(fabs(alpha));
}
#if defined(MAGICKCORE_HAVE_ACOSH)
if (IsFxFunction(expression,"acosh",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(acosh(alpha));
}
#endif
if (IsFxFunction(expression,"acos",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(acos(alpha));
}
#if defined(MAGICKCORE_HAVE_J1)
if (IsFxFunction(expression,"airy",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
if (alpha == 0.0)
FxReturn(1.0);
gamma=2.0*j1((MagickPI*alpha))/(MagickPI*alpha);
FxReturn(gamma*gamma);
}
#endif
#if defined(MAGICKCORE_HAVE_ASINH)
if (IsFxFunction(expression,"asinh",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(asinh(alpha));
}
#endif
if (IsFxFunction(expression,"asin",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(asin(alpha));
}
if (IsFxFunction(expression,"alt",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(((ssize_t) alpha) & 0x01 ? -1.0 : 1.0);
}
if (IsFxFunction(expression,"atan2",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(atan2(alpha,*beta));
}
#if defined(MAGICKCORE_HAVE_ATANH)
if (IsFxFunction(expression,"atanh",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(atanh(alpha));
}
#endif
if (IsFxFunction(expression,"atan",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(atan(alpha));
}
if (LocaleCompare(expression,"a") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'B':
case 'b':
{
if (LocaleCompare(expression,"b") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'C':
case 'c':
{
if (IsFxFunction(expression,"ceil",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(ceil(alpha));
}
if (IsFxFunction(expression,"clamp",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
if (alpha < 0.0)
FxReturn(0.0);
if (alpha > 1.0)
FxReturn(1.0);
FxReturn(alpha);
}
if (IsFxFunction(expression,"cosh",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(cosh(alpha));
}
if (IsFxFunction(expression,"cos",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(cos(alpha));
}
if (LocaleCompare(expression,"c") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'D':
case 'd':
{
if (IsFxFunction(expression,"debug",5) != MagickFalse)
{
const char
*type;
size_t
length;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
switch (fx_info->images->colorspace)
{
case CMYKColorspace:
{
switch (channel)
{
case CyanPixelChannel: type="cyan"; break;
case MagentaPixelChannel: type="magenta"; break;
case YellowPixelChannel: type="yellow"; break;
case AlphaPixelChannel: type="alpha"; break;
case BlackPixelChannel: type="black"; break;
default: type="unknown"; break;
}
break;
}
case GRAYColorspace:
{
switch (channel)
{
case RedPixelChannel: type="gray"; break;
case AlphaPixelChannel: type="alpha"; break;
default: type="unknown"; break;
}
break;
}
default:
{
switch (channel)
{
case RedPixelChannel: type="red"; break;
case GreenPixelChannel: type="green"; break;
case BluePixelChannel: type="blue"; break;
case AlphaPixelChannel: type="alpha"; break;
default: type="unknown"; break;
}
break;
}
}
*subexpression='\0';
length=1;
if (strlen(expression) > 6)
length=CopyMagickString(subexpression,expression+6,
MagickPathExtent);
if (length != 0)
subexpression[length-1]='\0';
if (fx_info->file != (FILE *) NULL)
(void) FormatLocaleFile(fx_info->file,"%s[%.20g,%.20g].%s: "
"%s=%.*g\n",fx_info->images->filename,(double) x,(double) y,type,
subexpression,GetMagickPrecision(),alpha);
FxReturn(alpha);
}
if (IsFxFunction(expression,"do",2) != MagickFalse)
{
size_t
length;
/*
Parse do(expression,condition test).
*/
length=CopyMagickString(subexpression,expression+3,
MagickPathExtent-1);
if (length != 0)
subexpression[length-1]='\0';
FxParseConditional(subexpression,',',p,q);
for (alpha=0.0; ; )
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta,
exception);
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans,
exception);
if (fabs(gamma) < MagickEpsilon)
break;
}
FxReturn(alpha);
}
if (IsFxFunction(expression,"drc",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn((alpha/(*beta*(alpha-1.0)+1.0)));
}
break;
}
case 'E':
case 'e':
{
if (LocaleCompare(expression,"epsilon") == 0)
FxReturn(MagickEpsilon);
#if defined(MAGICKCORE_HAVE_ERF)
if (IsFxFunction(expression,"erf",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(erf(alpha));
}
#endif
if (IsFxFunction(expression,"exp",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(exp(alpha));
}
if (LocaleCompare(expression,"e") == 0)
FxReturn(2.7182818284590452354);
break;
}
case 'F':
case 'f':
{
if (IsFxFunction(expression,"floor",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(floor(alpha));
}
if (IsFxFunction(expression,"for",3) != MagickFalse)
{
double
sans = 0.0;
size_t
length;
/*
Parse for(initialization, condition test, expression).
*/
length=CopyMagickString(subexpression,expression+4,
MagickPathExtent-1);
if (length != 0)
subexpression[length-1]='\0';
FxParseConditional(subexpression,',',p,q);
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans,
exception);
(void) CopyMagickString(subexpression,q+1,MagickPathExtent-1);
FxParseConditional(subexpression,',',p,q);
for (alpha=0.0; ; )
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans,
exception);
if (fabs(gamma) < MagickEpsilon)
break;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta,
exception);
}
FxReturn(alpha);
}
break;
}
case 'G':
case 'g':
{
if (IsFxFunction(expression,"gauss",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(exp((-alpha*alpha/2.0))/sqrt(2.0*MagickPI));
}
if (IsFxFunction(expression,"gcd",3) != MagickFalse)
{
MagickOffsetType
gcd;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
gcd=FxGCD((MagickOffsetType) (alpha+0.5),(MagickOffsetType) (*beta+
0.5));
FxReturn((double) gcd);
}
if (LocaleCompare(expression,"g") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'H':
case 'h':
{
if (LocaleCompare(expression,"h") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
if (LocaleCompare(expression,"hue") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
if (IsFxFunction(expression,"hypot",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(hypot(alpha,*beta));
}
break;
}
case 'K':
case 'k':
{
if (LocaleCompare(expression,"k") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'I':
case 'i':
{
if (IsFxFunction(expression,"if",2) != MagickFalse)
{
double
sans = 0.0;
size_t
length;
length=CopyMagickString(subexpression,expression+3,
MagickPathExtent-1);
if (length != 0)
subexpression[length-1]='\0';
FxParseConditional(subexpression,',',p,q);
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans,
exception);
(void) CopyMagickString(subexpression,q+1,MagickPathExtent-1);
FxParseConditional(subexpression,',',p,q);
if (fabs(alpha) >= MagickEpsilon)
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta,
exception);
else
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta,
exception);
FxReturn(alpha);
}
if (LocaleCompare(expression,"intensity") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
if (IsFxFunction(expression,"int",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(floor(alpha));
}
if (IsFxFunction(expression,"isnan",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn((double) !!IsNaN(alpha));
}
if (LocaleCompare(expression,"i") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'J':
case 'j':
{
if (LocaleCompare(expression,"j") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
#if defined(MAGICKCORE_HAVE_J0)
if (IsFxFunction(expression,"j0",2) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,
depth+1,beta,exception);
FxReturn(j0(alpha));
}
#endif
#if defined(MAGICKCORE_HAVE_J1)
if (IsFxFunction(expression,"j1",2) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,
depth+1,beta,exception);
FxReturn(j1(alpha));
}
#endif
#if defined(MAGICKCORE_HAVE_J1)
if (IsFxFunction(expression,"jinc",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
if (alpha == 0.0)
FxReturn(1.0);
FxReturn((2.0*j1((MagickPI*alpha))/(MagickPI*alpha)));
}
#endif
break;
}
case 'L':
case 'l':
{
if (IsFxFunction(expression,"ln",2) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,
depth+1,beta,exception);
FxReturn(log(alpha));
}
if (IsFxFunction(expression,"logtwo",6) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6,
depth+1,beta,exception);
FxReturn(log10(alpha)/log10(2.0));
}
if (IsFxFunction(expression,"log",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(log10(alpha));
}
if (LocaleCompare(expression,"lightness") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'M':
case 'm':
{
if (LocaleCompare(expression,"MaxRGB") == 0)
FxReturn(QuantumRange);
if (LocaleNCompare(expression,"maxima",6) == 0)
break;
if (IsFxFunction(expression,"max",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(alpha > *beta ? alpha : *beta);
}
if (LocaleNCompare(expression,"minima",6) == 0)
break;
if (IsFxFunction(expression,"min",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(alpha < *beta ? alpha : *beta);
}
if (IsFxFunction(expression,"mod",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(alpha-floor((alpha*PerceptibleReciprocal(*beta)))*(*beta));
}
if (LocaleCompare(expression,"m") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'N':
case 'n':
{
if (IsFxFunction(expression,"not",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn((double) (alpha < MagickEpsilon));
}
if (LocaleCompare(expression,"n") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'O':
case 'o':
{
if (LocaleCompare(expression,"Opaque") == 0)
FxReturn(1.0);
if (LocaleCompare(expression,"o") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'P':
case 'p':
{
if (LocaleCompare(expression,"phi") == 0)
FxReturn(MagickPHI);
if (LocaleCompare(expression,"pi") == 0)
FxReturn(MagickPI);
if (IsFxFunction(expression,"pow",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(pow(alpha,*beta));
}
if (LocaleCompare(expression,"p") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'Q':
case 'q':
{
if (LocaleCompare(expression,"QuantumRange") == 0)
FxReturn(QuantumRange);
if (LocaleCompare(expression,"QuantumScale") == 0)
FxReturn(QuantumScale);
break;
}
case 'R':
case 'r':
{
if (IsFxFunction(expression,"rand",4) != MagickFalse)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FxEvaluateSubexpression)
#endif
alpha=GetPseudoRandomValue(fx_info->random_info);
FxReturn(alpha);
}
if (IsFxFunction(expression,"round",5) != MagickFalse)
{
/*
Round the fraction to nearest integer.
*/
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
if ((alpha-floor(alpha)) < (ceil(alpha)-alpha))
FxReturn(floor(alpha));
FxReturn(ceil(alpha));
}
if (LocaleCompare(expression,"r") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'S':
case 's':
{
if (LocaleCompare(expression,"saturation") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
if (IsFxFunction(expression,"sign",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(alpha < 0.0 ? -1.0 : 1.0);
}
if (IsFxFunction(expression,"sinc",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
if (alpha == 0)
FxReturn(1.0);
FxReturn(sin((MagickPI*alpha))/(MagickPI*alpha));
}
if (IsFxFunction(expression,"sinh",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(sinh(alpha));
}
if (IsFxFunction(expression,"sin",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(sin(alpha));
}
if (IsFxFunction(expression,"sqrt",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(sqrt(alpha));
}
if (IsFxFunction(expression,"squish",6) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6,
depth+1,beta,exception);
FxReturn((1.0/(1.0+exp(-alpha))));
}
if (LocaleCompare(expression,"s") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'T':
case 't':
{
if (IsFxFunction(expression,"tanh",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(tanh(alpha));
}
if (IsFxFunction(expression,"tan",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(tan(alpha));
}
if (LocaleCompare(expression,"Transparent") == 0)
FxReturn(0.0);
if (IsFxFunction(expression,"trunc",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
if (alpha >= 0.0)
FxReturn(floor(alpha));
FxReturn(ceil(alpha));
}
if (LocaleCompare(expression,"t") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'U':
case 'u':
{
if (LocaleCompare(expression,"u") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'V':
case 'v':
{
if (LocaleCompare(expression,"v") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'W':
case 'w':
{
if (IsFxFunction(expression,"while",5) != MagickFalse)
{
size_t
length;
/*
Parse while(condition test, expression).
*/
length=CopyMagickString(subexpression,expression+6,
MagickPathExtent-1);
if (length != 0)
subexpression[length-1]='\0';
FxParseConditional(subexpression,',',p,q);
for (alpha=0.0; ; )
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans,
exception);
if (fabs(gamma) < MagickEpsilon)
break;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,
beta,exception);
}
FxReturn(alpha);
}
if (LocaleCompare(expression,"w") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'Y':
case 'y':
{
if (LocaleCompare(expression,"y") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'Z':
case 'z':
{
if (LocaleCompare(expression,"z") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
default:
break;
}
subexpression=DestroyString(subexpression);
q=(char *) expression;
alpha=InterpretSiPrefixValue(expression,&q);
if (q == expression)
alpha=FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception);
FxReturn(alpha);
}
MagickPrivate MagickBooleanType FxEvaluateExpression(FxInfo *fx_info,
double *alpha,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha,
exception);
return(status);
}
MagickExport MagickBooleanType FxPreprocessExpression(FxInfo *fx_info,
double *alpha,ExceptionInfo *exception)
{
FILE
*file;
MagickBooleanType
status;
file=fx_info->file;
fx_info->file=(FILE *) NULL;
status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha,
exception);
fx_info->file=file;
return(status);
}
MagickPrivate MagickBooleanType FxEvaluateChannelExpression(FxInfo *fx_info,
const PixelChannel channel,const ssize_t x,const ssize_t y,
double *alpha,ExceptionInfo *exception)
{
double
beta;
beta=0.0;
*alpha=FxEvaluateSubexpression(fx_info,channel,x,y,fx_info->expression,0,
&beta,exception);
return(exception->severity == OptionError ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F x I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FxImage() applies a mathematical expression to the specified image.
%
% The format of the FxImage method is:
%
% Image *FxImage(const Image *image,const char *expression,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o expression: A mathematical expression.
%
% o exception: return any errors or warnings in this structure.
%
*/
static FxInfo **DestroyFxThreadSet(FxInfo **fx_info)
{
register ssize_t
i;
assert(fx_info != (FxInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (fx_info[i] != (FxInfo *) NULL)
fx_info[i]=DestroyFxInfo(fx_info[i]);
fx_info=(FxInfo **) RelinquishMagickMemory(fx_info);
return(fx_info);
}
static FxInfo **AcquireFxThreadSet(const Image *image,const char *expression,
ExceptionInfo *exception)
{
char
*fx_expression;
double
alpha;
FxInfo
**fx_info;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
fx_info=(FxInfo **) AcquireQuantumMemory(number_threads,sizeof(*fx_info));
if (fx_info == (FxInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return((FxInfo **) NULL);
}
(void) memset(fx_info,0,number_threads*sizeof(*fx_info));
if (*expression != '@')
fx_expression=ConstantString(expression);
else
fx_expression=FileToString(expression+1,~0UL,exception);
for (i=0; i < (ssize_t) number_threads; i++)
{
MagickBooleanType
status;
fx_info[i]=AcquireFxInfo(image,fx_expression,exception);
if (fx_info[i] == (FxInfo *) NULL)
break;
status=FxPreprocessExpression(fx_info[i],&alpha,exception);
if (status == MagickFalse)
break;
}
fx_expression=DestroyString(fx_expression);
if (i < (ssize_t) number_threads)
fx_info=DestroyFxThreadSet(fx_info);
return(fx_info);
}
MagickExport Image *FxImage(const Image *image,const char *expression,
ExceptionInfo *exception)
{
#define FxImageTag "Fx/Image"
CacheView
*fx_view,
*image_view;
FxInfo
**magick_restrict fx_info;
Image
*fx_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (expression == (const char *) NULL)
return(CloneImage(image,0,0,MagickTrue,exception));
fx_info=AcquireFxThreadSet(image,expression,exception);
if (fx_info == (FxInfo **) NULL)
return((Image *) NULL);
fx_image=CloneImage(image,0,0,MagickTrue,exception);
if (fx_image == (Image *) NULL)
{
fx_info=DestroyFxThreadSet(fx_info);
return((Image *) NULL);
}
if (SetImageStorageClass(fx_image,DirectClass,exception) == MagickFalse)
{
fx_info=DestroyFxThreadSet(fx_info);
fx_image=DestroyImage(fx_image);
return((Image *) NULL);
}
/*
Fx image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
fx_view=AcquireAuthenticCacheView(fx_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic) shared(progress,status) \
magick_number_threads(image,fx_image,fx_image->rows,1)
#endif
for (y=0; y < (ssize_t) fx_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(fx_view,0,y,fx_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) fx_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait fx_traits=GetPixelChannelTraits(fx_image,channel);
if ((traits == UndefinedPixelTrait) ||
(fx_traits == UndefinedPixelTrait))
continue;
if ((fx_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(fx_image,channel,p[i],q);
continue;
}
alpha=0.0;
(void) FxEvaluateChannelExpression(fx_info[id],channel,x,y,&alpha,
exception);
q[i]=ClampToQuantum(QuantumRange*alpha);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(fx_image);
}
if (SyncCacheViewAuthenticPixels(fx_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,FxImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
fx_view=DestroyCacheView(fx_view);
image_view=DestroyCacheView(image_view);
fx_info=DestroyFxThreadSet(fx_info);
if (status == MagickFalse)
fx_image=DestroyImage(fx_image);
return(fx_image);
}
|
disk.h | #pragma once
class SolidDisk{
public:
static PS::S32 n_init;
static PS::F64 m_init;
static PS::F64 p;
//static PS::F64 f_in;
//static PS::F64 f_out;
static PS::F64 f_dust;
static PS::F64 eta_ice;
static PS::F64 a_in;
static PS::F64 a_out;
static PS::F64 a_ice;
static PS::F64 ecc_hill;
static PS::F64 inc_hill;
static PS::F64 calcDustMass(const PS::F64 a0,
const PS::F64 a1,
const bool inIce) {
const PS::F64 L_CGS = 14959787070000;
const PS::F64 M_CGS = 1.9884e33;
if ( a1 < a0 ) return 0.;
if ( inIce ) {
const PS::F64 coef_in = 10. * f_dust /M_CGS*L_CGS*L_CGS;
return 2.*M_PI*coef_in/(2.-p) * ( pow(a1, 2.-p) - pow(a0, 2.-p) );
} else {
const PS::F64 coef_out = 10. * f_dust * eta_ice /M_CGS*L_CGS*L_CGS;
return 2.*M_PI*coef_out/(2.-p) * ( pow(a1, 2.-p) - pow(a0, 2.-p) );
}
}
static PS::F64 getSemimajorAxis(const PS::F64 a0,
const PS::F64 a1) {
assert ( a0 < a1 );
PS::F64 R = drand48();
if ( p != 2 ){
return pow( (pow(a1,2.-p) - pow(a0,2.-p)) * R + pow(a0,2.-p), 1./(2.-p) );
} else {
return exp( (log(a1) - log(a0)) * R + log(a0) );
}
}
template <class Tpsys>
static void createInitialCondition(Tpsys & pp){
if ( PS::Comm::getRank() == 0 ){
const PS::F64 m_sun = FPGrav::m_sun;
PS::F64 m_in = 0.;
PS::F64 m_out = 0.;
PS::S32 n_in = 0;
//PS::S32 n_out = 0;
////////////////////////////////////
/* Set Particle Mass & Number */
////////////////////////////////////
if ( a_out < a_ice ) {
m_in = calcDustMass(a_in, a_out, true);
m_out = 0.;
} else if ( a_ice < a_in ) {
m_in = 0.;
m_out = calcDustMass(a_in, a_out, false);
} else {
m_in = calcDustMass(a_in, a_ice, true);
m_out = calcDustMass(a_ice, a_out, false);
}
assert( n_init >= 0 );
assert( m_init >= 0. );
if ( m_init == 0. ) {
assert( n_init > 0 );
m_init = (m_in + m_out) / n_init;
}
if ( n_init == 0 ){
assert( m_init > 0. );
n_init = (m_in + m_out) / m_init;
}
n_in = (PS::S32)round(m_in/(m_in + m_out) * n_init);
//n_out = n_init - n_in;
////////////////////////////////
/* Create Particle System */
////////////////////////////////
pp.setNumberOfParticleLocal(n_init);
for ( PS::S32 i=0; i<n_init; i++ ){
pp[i].id = i;
pp[i].mass = m_init;
// set orbital element
PS::F64 ax;
PS::F64 h = pow(pp[i].mass/(3.*m_sun), 1./3.);
if ( a_out < a_ice || a_ice < a_in ) {
ax = getSemimajorAxis(a_in, a_out);
} else {
if ( i < n_in ) {
ax = getSemimajorAxis(a_in, a_ice);
} else {
ax = getSemimajorAxis(a_ice, a_out);
}
}
PS::F64 ecc = getGaussian(ecc_hill*h);
PS::F64 inc = getGaussian(inc_hill*h);
PS::F64 l = 2 * M_PI * drand48();
PS::F64 u = solveKeplerEq(l, ecc);
PS::F64 omg = 2 * M_PI * drand48();
PS::F64 OMG = 2 * M_PI * drand48();
PS::F64 n = sqrt(m_sun / (ax*ax*ax));
PS::F64vec P, Q;
P.x = cos(omg)*cos(OMG) - sin(omg)*sin(OMG)*cos(inc);
P.y = cos(omg)*sin(OMG) + sin(omg)*cos(OMG)*cos(inc);
P.z = sin(omg)*sin(inc);
Q.x = -sin(omg)*cos(OMG) - cos(omg)*sin(OMG)*cos(inc);
Q.y = -sin(omg)*sin(OMG) + cos(omg)*cos(OMG)*cos(inc);
Q.z = cos(omg)*sin(inc);
orbitalElement2PosVel(pp[i].pos, pp[i].vel, m_sun,
ax, ecc, n, u, P, Q);
}
} else {
pp.setNumberOfParticleLocal(0);
}
}
};
PS::S32 SolidDisk::n_init = 0;
PS::F64 SolidDisk::m_init = 0.;
PS::F64 SolidDisk::p = 1.5;
PS::F64 SolidDisk::f_dust = 0.71;
PS::F64 SolidDisk::eta_ice = 30./7.1;
PS::F64 SolidDisk::a_in = 0.98;
PS::F64 SolidDisk::a_out = 1.02;
PS::F64 SolidDisk::a_ice = 2.0;
PS::F64 SolidDisk::ecc_hill = 2.0;
PS::F64 SolidDisk::inc_hill = 1.0;
class GasDisk{
public:
static PS::F64 alpha_gas;
static PS::F64 beta_gas;
static PS::F64 f_gas;
static PS::F64 tau_gas;
static PS::F64 C_d;
static PS::F64 mu;
PS::F64 coef_rho_gas;
PS::F64 coef_cs_vk;
PS::F64 coef_acc_gd;
GasDisk(){
const PS::F64 L_CGS = 14959787070000;
const PS::F64 M_CGS = 1.9884e33;
const PS::F64 T = 365.25*24.*60.*60./(2.*M_PI);
coef_rho_gas = 1.4e-9 * f_gas /M_CGS*L_CGS*L_CGS*L_CGS;
const PS::F64 k_B = 1.380649e-16 /(M_CGS*L_CGS*L_CGS)*T*T;
const PS::F64 N_A = 6.022140857e23;
const PS::F64 m_H = 1./N_A /M_CGS;
PS::F64 coef_cs = sqrt(k_B * 280 / (mu * m_H));
PS::F64 coef_vk = sqrt(FPGrav::m_sun);
coef_cs_vk = coef_cs / coef_vk;
coef_acc_gd = 0.5*C_d*M_PI;
if ( PS::Comm::getRank() == 0 ) {
std::cout << "rho_gas at 1 AU = " << coef_rho_gas << std::endl
<< "cs/vk at 1 AU = " << coef_cs_vk << std::endl;
}
}
template <class Tpsys>
void calcGasDrag(Tpsys & pp,
PS::F64 time,
PS::F64 L=1.,
bool clear=true){
const PS::S32 n_loc = pp.getNumberOfParticleLocal();
#pragma omp parallel for
for(PS::S32 i=0; i<n_loc; i++){
PS::F64 r2 = pp[i].pos.x*pp[i].pos.x + pp[i].pos.y*pp[i].pos.y;
PS::F64 r_inv = 1./sqrt(r2);
PS::F64 r = r2 * r_inv;
PS::F64 rho_gas = coef_rho_gas * pow(r, -alpha_gas);
if ( tau_gas != 0. ) rho_gas *= exp(-time / tau_gas);
PS::F64 cs_vk = coef_cs_vk * sqrt(sqrt(r)) * pow(L, 1./8.);
PS::F64vec ev(-pp[i].pos.y*r_inv, pp[i].pos.x*r_inv, 0.0);
PS::F64vec vkep = sqrt(FPGrav::m_sun * r_inv) * ev;
PS::F64 eta = 0.5 * (alpha_gas + beta_gas) * cs_vk * cs_vk;
PS::F64vec vgas = (1.0 - eta)*vkep;
PS::F64vec u = pp[i].vel - vgas;
//PRL(eta);
//PS::F64 rplanet = cbrt(0.75*pp[i].mass/(M_PI*FPGrav::dens));
if (clear) pp[i].acc_gd = 0.;
if ( pp[i].mass != 0. ) {
//pp[i].acc_gd += -coef_acc_gd * rplanet * rplanet * rho_gas * sqrt(u*u) * u / pp[i].mass;
pp[i].acc_gd += -coef_acc_gd * pp[i].r_planet * pp[i].r_planet * rho_gas * sqrt(u*u) * u / pp[i].mass;
pp[i].acc += pp[i].acc_gd;
}
}
}
};
PS::F64 GasDisk::alpha_gas = 11./4.;
PS::F64 GasDisk::beta_gas = 0.5;
PS::F64 GasDisk::f_gas = 0.71;
PS::F64 GasDisk::tau_gas = 1.e6*2.*M_PI;
PS::F64 GasDisk::C_d = 1.;
PS::F64 GasDisk::mu = 2.34;
|
ztrmm.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> s d c
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_trmm
*
* Performs a triangular matrix-matrix multiply of the form
*
* \f[B = \alpha [op(A) \times B] \f], if side = PlasmaLeft or
* \f[B = \alpha [B \times op(A)] \f], if side = PlasmaRight
*
* where op( X ) is one of:
*
* - op(A) = A or
* - op(A) = A^T or
* - op(A) = A^H
*
* alpha is a scalar, B is an m-by-n matrix and A is a unit or non-unit, upper
* or lower triangular matrix.
*
*******************************************************************************
*
* @param[in] side
* Specifies whether op( A ) appears on the left or on the right of B:
* - PlasmaLeft: alpha*op( A )*B
* - PlasmaRight: alpha*B*op( A )
*
* @param[in] uplo
* Specifies whether the matrix A is upper triangular or lower
* triangular:
* - PlasmaUpper: Upper triangle of A is stored;
* - PlasmaLower: Lower triangle of A is stored.
*
* @param[in] transa
* Specifies whether the matrix A is transposed, not transposed or
* conjugate transposed:
* - PlasmaNoTrans: A is transposed;
* - PlasmaTrans: A is not transposed;
* - PlasmaConjTrans: A is conjugate transposed.
*
* @param[in] diag
* Specifies whether or not A is unit triangular:
* - PlasmaNonUnit: A is non-unit triangular;
* - PlasmaUnit: A is unit triangular.
*
* @param[in] m
* The number of rows of matrix B.
* m >= 0.
*
* @param[in] n
* The number of columns of matrix B.
* n >= 0.
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] pA
* The triangular matrix A of dimension lda-by-k, where k is m when
* side='L' or 'l' and k is n when when side='R' or 'r'. If uplo =
* PlasmaUpper, the leading k-by-k upper triangular part of the array
* A contains the upper triangular matrix, and the strictly lower
* triangular part of A is not referenced. If uplo = PlasmaLower, the
* leading k-by-k lower triangular part of the array A contains the
* lower triangular matrix, and the strictly upper triangular part of
* A is not referenced. If diag = PlasmaUnit, the diagonal elements of
* A are also not referenced and are assumed to be 1.
*
* @param[in] lda
* The leading dimension of the array A. When side='L' or 'l',
* lda >= max(1,m), when side='R' or 'r' then lda >= max(1,n).
*
* @param[in,out] pB
* On entry, the matrix B of dimension ldb-by-n.
* On exit, the result of a triangular matrix-matrix multiply
* ( alpha*op(A)*B ) or ( alpha*B*op(A) ).
*
* @param[in] ldb
* The leading dimension of the array B. ldb >= max(1,m).
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
*
*******************************************************************************
*
* @sa plasma_omp_ztrmm
* @sa plasma_ctrmm
* @sa plasma_dtrmm
* @sa plasma_strmm
*
******************************************************************************/
int plasma_ztrmm(plasma_enum_t side, plasma_enum_t uplo,
plasma_enum_t transa, plasma_enum_t diag,
int m, int n,
plasma_complex64_t alpha, plasma_complex64_t *pA, int lda,
plasma_complex64_t *pB, int ldb)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if (side != PlasmaLeft && side != PlasmaRight) {
plasma_error("illegal value of side");
return -1;
}
if (uplo != PlasmaUpper && uplo != PlasmaLower) {
plasma_error("illegal value of uplo");
return -2;
}
if (transa != PlasmaConjTrans &&
transa != PlasmaNoTrans &&
transa != PlasmaTrans )
{
plasma_error("illegal value of transa");
return -3;
}
if (diag != PlasmaUnit && diag != PlasmaNonUnit) {
plasma_error("illegal value of diag");
return -4;
}
if (m < 0) {
plasma_error("illegal value of m");
return -5;
}
if (n < 0) {
plasma_error("illegal value of n");
return -6;
}
int k = (side == PlasmaLeft) ? m : n;
if (lda < imax(1, k)) {
plasma_error("illegal value of lda");
return -8;
}
if (ldb < imax(1, m)) {
plasma_error("illegal value of ldb");
return -10;
}
// quick return
if (imin(m, n) == 0)
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_trmm(plasma, PlasmaComplexDouble, m, n);
// Set tiling parameters.
int nb = plasma->nb;
// Create tile matrices.
plasma_desc_t A;
plasma_desc_t B;
int retval;
retval = plasma_desc_triangular_create(PlasmaComplexDouble, uplo, nb, nb,
k, k, 0, 0, k, k, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_triangular_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb,
m, n, 0, 0, m, n, &B);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate matrices to tile layout.
plasma_omp_ztr2desc(pA, lda, A, &sequence, &request);
plasma_omp_zge2desc(pB, ldb, B, &sequence, &request);
// Call tile async interface.
plasma_omp_ztrmm(side, uplo, transa, diag,
alpha, A,
B,
&sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_zdesc2ge(B, pB, ldb, &sequence, &request);
}
// implicit synchronization
// Free matrices in tile layout.
plasma_desc_destroy(&A);
plasma_desc_destroy(&B);
// Return status.
return sequence.status;
}
/***************************************************************************//**
*
* @ingroup plasma_trmm
*
* Performs triangular matrix multiplication. Non-blocking tile version of
* plasma_ztrmm(). May return before the computation is finished. Operates on
* matrices stored by tiles. All matrices are passed through descriptors. All
* dimensions are taken from the descriptors. Allows for pipelining of
* operations at runtime.
*
*******************************************************************************
*
* @param[in] side
* Specifies whether op( A ) appears on the left or on the right of B:
* - PlasmaLeft: alpha*op( A )*B
* - PlasmaRight: alpha*B*op( A )
*
* @param[in] uplo
* Specifies whether the matrix A is upper triangular or lower
* triangular:
* - PlasmaUpper: Upper triangle of A is stored;
* - PlasmaLower: Lower triangle of A is stored.
*
* @param[in] transa
* Specifies whether the matrix A is transposed, not transposed or
* conjugate transposed:
* - PlasmaNoTrans: A is transposed;
* - PlasmaTrans: A is not transposed;
* - PlasmaConjTrans: A is conjugate transposed.
*
* @param[in] diag
* Specifies whether or not A is unit triangular:
* - PlasmaNonUnit: A is non-unit triangular;
* - PlasmaUnit: A is unit triangular.
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] A
* Descriptor of the triangular matrix A.
*
* @param[in,out] B
* Descriptor of matrix B.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes).
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_ztrmm
* @sa plasma_omp_ctrmm
* @sa plasma_omp_dtrmm
* @sa plasma_omp_strmm
*
******************************************************************************/
void plasma_omp_ztrmm(plasma_enum_t side, plasma_enum_t uplo,
plasma_enum_t transa, plasma_enum_t diag,
plasma_complex64_t alpha, plasma_desc_t A,
plasma_desc_t B,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorNotInitialized);
return;
}
// Check input arguments.
if (side != PlasmaLeft && side != PlasmaRight) {
plasma_error("illegal value of side");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (uplo != PlasmaUpper && uplo != PlasmaLower) {
plasma_error("illegal value of uplo");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (transa != PlasmaConjTrans &&
transa != PlasmaNoTrans &&
transa != PlasmaTrans) {
plasma_error("illegal value of transa");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (diag != PlasmaUnit && diag != PlasmaNonUnit) {
plasma_error("illegal value of diag");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(B) != PlasmaSuccess) {
plasma_error("invalid B");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (A.m == 0 || A.n == 0 || B.m == 0 || B.n == 0)
return;
if (alpha == 0.0) {
plasma_complex64_t zzero = 0.0;
plasma_pzlaset(PlasmaGeneral, zzero, zzero, B, sequence, request);
return;
}
// Call parallel function.
plasma_pztrmm(side, uplo, transa, diag, alpha,
A, B,
sequence, request);
}
|
Stencil_par4.c | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include "malloc2D.h"
#include "timer.h"
int main(int argc, char *argv[])
{
struct timespec tstart_cpu, tstop_cpu;
double cpu_time;
int imax=2002, jmax = 2002;
int niter=1000, nburst=100;
double** restrict x = malloc2D(jmax, imax);
double** restrict xnew = malloc2D(jmax, imax);
#pragma omp target enter data map(to:x[0:jmax][0:imax], xnew[0:jmax][0:imax])
#pragma omp target teams
{
#pragma omp distribute
for (int j = 0; j < jmax; j++){
#pragma omp parallel for simd
for (int i = 0; i < imax; i++){
xnew[j][i] = 0.0;
x[j][i] = 5.0;
}
}
#pragma omp distribute
for (int j = jmax/2 - 5; j < jmax/2 + 5; j++){
#pragma omp parallel for simd
for (int i = imax/2 - 5; i < imax/2 -1; i++){
x[j][i] = 400.0;
}
}
}
for (int iter = 0; iter < niter; iter+=nburst){
for (int ib = 0; ib < nburst; ib++){
cpu_timer_start(&tstart_cpu);
#pragma omp target teams distribute
for (int j = 1; j < jmax-1; j++){
#pragma omp parallel for simd
for (int i = 1; i < imax-1; i++){
xnew[j][i] = ( x[j][i] + x[j][i-1] + x[j][i+1] + x[j-1][i] + x[j+1][i] )/5.0;
}
}
#pragma omp target teams distribute
for (int j = 0; j < jmax; j++){
#pragma omp parallel for simd
for (int i = 0; i < imax; i++){
x[j][i] = xnew[j][i];
}
}
cpu_time += cpu_timer_stop(tstart_cpu);
}
printf("Iter %d\n",iter+nburst);
}
#pragma omp target exit data map(from:x[0:jmax][0:imax], xnew[0:jmax][0:imax])
free(x);
free(xnew);
printf("Timing is %lf\n",cpu_time);
}
|
GB_unop__sqrt_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__sqrt_fp64_fp64)
// op(A') function: GB (_unop_tran__sqrt_fp64_fp64)
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = sqrt (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = sqrt (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = sqrt (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SQRT || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__sqrt_fp64_fp64)
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = sqrt (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = sqrt (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__sqrt_fp64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
heat-2dp.dtile.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Discretized 2D heat equation stencil with non periodic boundary conditions
* Adapted from Pochoir test bench
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <math.h>
/*
* N is the number of points
* T is the number of timesteps
*/
#ifdef HAS_DECLS
#include "decls.h"
#else
#define N 8000L
#define T 100000L
#endif
#define NUM_FP_OPS 10
/* Define our arrays */
double A[2][N][N];
double total=0; double sum_err_sqr=0;
int chtotal=0;
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) {
if (x->tv_usec < y->tv_usec) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000) {
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
return x->tv_sec < y->tv_sec;
}
int main(int argc, char * argv[]) {
long int t, i, j, k;
const int BASE = 1024;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0;
printf("Number of points = %ld\t|Number of timesteps = %ld\t", N*N, T);
/* Initialization */
srand(42); // seed with a constant value to verify results
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
A[0][i][j] = 1.0 * (rand() % BASE);
}
}
#ifdef TIME
gettimeofday(&start, 0);
#endif
#undef N
#define N 4000L
int t1, t2, t3, t4, t5, t6;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((N >= 1) && (T >= 1)) {
for (t1=-1;t1<=floord(T-1,16);t1++) {
lbp=ceild(t1,2);
ubp=min(floord(T+N-2,32),floord(16*t1+N+14,32));
#pragma omp parallel for private(lbv,ubv,t2,t3,t4,t5,t6)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(0,ceild(t1-7,8));t3<=min(floord(T+N-2,128),floord(16*t1+N+30,128));t3++) {
if(t2==lbp || t3<= max(0,ceild(t1-3,4))+1){
for (t4=max(max(max(0,16*t1),32*t2-N+1),128*t3-N+1);t4<=min(min(min(T-1,16*t1+31),128*t3+127),32*t1-32*t2+N+30);t4++) {
for (t5=max(max(32*t2,t4),-32*t1+32*t2+2*t4-31);t5<=min(min(32*t2+31,-32*t1+32*t2+2*t4),t4+N-1);t5++) {
lbv=max(128*t3,t4);
ubv=min(128*t3+127,t4+N-1);
#pragma ivdep
#pragma vector always
for (t6=lbv;t6<=ubv;t6++) {
A[(t4+1)%2][-t4+t5][-t4+t6] =0.125*(A[t4%2][-t4+t5+1][-t4+t6]-2.0*A[t4%2][-t4+t5][-t4+t6]+A[t4%2][(t4==t5)?(2*N-1):(-t4+t5-1)][-t4+t6]) +0.125*(A[t4%2][-t4+t5][-t4+t6+1]-2.0*A[t4%2][-t4+t5][-t4+t6]+A[t4%2][-t4+t5][(t4==t6)?(2*N-1):(-t4+t6-1)])+A[t4%2][-t4+t5][-t4+t6];;
A[(t4+1)%2][2*N-1-(-t4+t5)][-t4+t6] =0.125*(A[t4%2][(t4==t5)?0:(2*N-1-(-t4+t5)+1)][-t4+t6]-2.0*A[t4%2][2*N-1-(-t4+t5)][-t4+t6]+A[t4%2][2*N-1-(-t4+t5)-1][-t4+t6]) +0.125*(A[t4%2][2*N-1-(-t4+t5)][-t4+t6+1]-2.0*A[t4%2][2*N-1-(-t4+t5)][-t4+t6]+A[t4%2][2*N-1-(-t4+t5)][(t4==t6)?(2*N-1):(-t4+t6-1)])+A[t4%2][2*N-1-(-t4+t5)][-t4+t6];;
A[(t4+1)%2][-t4+t5][2*N-1-(-t4+t6)] =0.125*(A[t4%2][-t4+t5+1][2*N-1-(-t4+t6)]-2.0*A[t4%2][-t4+t5][2*N-1-(-t4+t6)]+A[t4%2][(t4==t5)?(2*N-1):(-t4+t5-1)][2*N-1-(-t4+t6)]) +0.125*(A[t4%2][-t4+t5][(t4==t6)?0:(2*N-1-(-t4+t6)+1)]-2.0*A[t4%2][-t4+t5][2*N-1-(-t4+t6)]+A[t4%2][-t4+t5][2*N-1-(-t4+t6)-1])+A[t4%2][-t4+t5][2*N-1-(-t4+t6)];;
A[(t4+1)%2][2*N-1-(-t4+t5)][2*N-1-(-t4+t6)]=0.125*(A[t4%2][(t4==t5)?0:(2*N-1-(-t4+t5)+1)][2*N-1-(-t4+t6)]-2.0*A[t4%2][2*N-1-(-t4+t5)][2*N-1-(-t4+t6)]+A[t4%2][2*N-1-(-t4+t5)-1][2*N-1-(-t4+t6)]) +0.125*(A[t4%2][2*N-1-(-t4+t5)][(t4==t6)?0:(2*N-1-(-t4+t6)+1)]-2.0*A[t4%2][2*N-1-(-t4+t5)][2*N-1-(-t4+t6)]+A[t4%2][2*N-1-(-t4+t5)][2*N-1-(-t4+t6)-1])+A[t4%2][2*N-1-(-t4+t5)][2*N-1-(-t4+t6)];;
}
}
}
}
else {
for (t4=max(max(max(0,16*t1),32*t2-N+1),128*t3-N+1);t4<=min(min(min(T-1,16*t1+31),128*t3+127),32*t1-32*t2+N+30);t4++) {
if(t4%2==0){
for (t5=max(max(32*t2,t4),-32*t1+32*t2+2*t4-31);t5<=min(min(32*t2+31,-32*t1+32*t2+2*t4),t4+N-1);t5++) {
lbv=max(128*t3,t4);
ubv=min(128*t3+127,t4+N-1);
#pragma ivdep
#pragma vector always
for (t6=lbv;t6<=ubv;t6++) {
A[1][-t4+t5][-t4+t6]=0.125*(A[0][-t4+t5+1][-t4+t6]-2.0*A[0][-t4+t5][-t4+t6]+A[0][-t4+t5-1][-t4+t6])+0.125*(A[0][-t4+t5][-t4+t6+1]-2.0*A[0][-t4+t5][-t4+t6]+A[0][-t4+t5][-t4+t6-1])+A[0][-t4+t5][-t4+t6];;
}
#pragma ivdep
#pragma vector always
for (t6=lbv;t6<=ubv;t6++) {
A[1][2*N-1-(-t4+t5)][-t4+t6]=0.125*(A[0][2*N-1-(-t4+t5)+1][-t4+t6]-2.0*A[0][2*N-1-(-t4+t5)][-t4+t6]+A[0][2*N-1-(-t4+t5)-1][-t4+t6])+0.125*(A[0][2*N-1-(-t4+t5)][-t4+t6+1]-2.0*A[0][2*N-1-(-t4+t5)][-t4+t6]+A[0][2*N-1-(-t4+t5)][-t4+t6-1])+A[0][2*N-1-(-t4+t5)][-t4+t6];;
}
lbv=2*N-1-max(128*t3,t4);
ubv=2*N-1-min(128*t3+127,t4+N-1);
#pragma ivdep
#pragma vector always
for (t6=ubv;t6<=lbv;t6++) {
A[1][-t4+t5][t4+t6]=0.125*(A[0][-t4+t5+1][t4+t6]-2.0*A[0][-t4+t5][t4+t6]+A[0][-t4+t5-1][t4+t6])+0.125*(A[0][-t4+t5][t4+t6+1]-2.0*A[0][-t4+t5][t4+t6]+A[0][-t4+t5][t4+t6-1])+A[0][-t4+t5][t4+t6];;
}
#pragma ivdep
#pragma vector always
for (t6=ubv;t6<=lbv;t6++) {
A[1][2*N-1-(-t4+t5)][t4+t6]=0.125*(A[0][2*N-1-(-t4+t5)+1][t4+t6]-2.0*A[0][2*N-1-(-t4+t5)][t4+t6]+A[0][2*N-1-(-t4+t5)-1][t4+t6])+0.125*(A[0][2*N-1-(-t4+t5)][t4+t6+1]-2.0*A[0][2*N-1-(-t4+t5)][t4+t6]+A[0][2*N-1-(-t4+t5)][t4+t6-1])+A[0][2*N-1-(-t4+t5)][t4+t6];;
}
}
}else{
for (t5=max(max(32*t2,t4),-32*t1+32*t2+2*t4-31);t5<=min(min(32*t2+31,-32*t1+32*t2+2*t4),t4+N-1);t5++) {
lbv=max(128*t3,t4);
ubv=min(128*t3+127,t4+N-1);
#pragma ivdep
#pragma vector always
for (t6=lbv;t6<=ubv;t6++) {
A[0][-t4+t5][-t4+t6]=0.125*(A[1][-t4+t5+1][-t4+t6]-2.0*A[1][-t4+t5][-t4+t6]+A[1][-t4+t5-1][-t4+t6])+0.125*(A[1][-t4+t5][-t4+t6+1]-2.0*A[1][-t4+t5][-t4+t6]+A[1][-t4+t5][-t4+t6-1])+A[1][-t4+t5][-t4+t6];;
}
#pragma ivdep
#pragma vector always
for (t6=lbv;t6<=ubv;t6++) {
A[0][2*N-1-(-t4+t5)][-t4+t6]=0.125*(A[1][2*N-1-(-t4+t5)+1][-t4+t6]-2.0*A[1][2*N-1-(-t4+t5)][-t4+t6]+A[1][2*N-1-(-t4+t5)-1][-t4+t6])+0.125*(A[1][2*N-1-(-t4+t5)][-t4+t6+1]-2.0*A[1][2*N-1-(-t4+t5)][-t4+t6]+A[1][2*N-1-(-t4+t5)][-t4+t6-1])+A[1][2*N-1-(-t4+t5)][-t4+t6];;
}
lbv=2*N-1-max(128*t3,t4);
ubv=2*N-1-min(128*t3+127,t4+N-1);
#pragma ivdep
#pragma vector always
for (t6=ubv;t6<=lbv;t6++) {
A[0][-t4+t5][t4+t6]=0.125*(A[1][-t4+t5+1][t4+t6]-2.0*A[1][-t4+t5][t4+t6]+A[1][-t4+t5-1][t4+t6])+0.125*(A[1][-t4+t5][t4+t6+1]-2.0*A[1][-t4+t5][t4+t6]+A[1][-t4+t5][t4+t6-1])+A[1][-t4+t5][t4+t6];;
}
#pragma ivdep
#pragma vector always
for (t6=ubv;t6<=lbv;t6++) {
A[0][2*N-1-(-t4+t5)][t4+t6]=0.125*(A[1][2*N-1-(-t4+t5)+1][t4+t6]-2.0*A[1][2*N-1-(-t4+t5)][t4+t6]+A[1][2*N-1-(-t4+t5)-1][t4+t6])+0.125*(A[1][2*N-1-(-t4+t5)][t4+t6+1]-2.0*A[1][2*N-1-(-t4+t5)][t4+t6]+A[1][2*N-1-(-t4+t5)][t4+t6-1])+A[1][2*N-1-(-t4+t5)][t4+t6];;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
#ifdef TIME
gettimeofday(&end, 0);
#undef N
#define N 8000L
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double)(result.tv_sec + result.tv_usec * 1.0e-6);
printf("|Time taken = %7.5lfms\t", tdiff * 1.0e3);
printf("|MFLOPS = %f\n", ((((double)NUM_FP_OPS * N *N * T) / tdiff) / 1000000L));
#endif
#ifdef VERIFY
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
total+= A[T%2][i][j] ;
}
}
printf("|sum: %e\t", total);
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
sum_err_sqr += (A[T%2][i][j] - (total/N))*(A[T%2][i][j] - (total/N));
}
}
printf("|rms(A) = %7.2f\t", sqrt(sum_err_sqr));
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
chtotal += ((char *)A[T%2][i])[j];
}
}
printf("|sum(rep(A)) = %d\n", chtotal);
#endif
return 0;
}
// icc -O3 -fp-model precise heat_1d_np.c -o op-heat-1d-np -lm
// /* @ begin PrimeTile (num_tiling_levels=1; first_depth=1; last_depth=-1; boundary_tiling_level=-1;) @*/
// /* @ begin PrimeRegTile (scalar_replacement=0; T1t3=8; T1t4=8; ) @*/
// /* @ end @*/
|
time.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#define Length 1.0
#define Temperature_1 1.0
#define Temperature_2 5.0
int main(int argc, char **argv) {
// Время, когда требуется посчитать распределение температуры в стержне
double Time = 1.0;
// Число разбиений по координате
size_t M = 10;
// Количество паралельных процессов
size_t size = 1;
if (argc > 1) {
// Считываем время, когда хотим узнать распределение температуры
// в стержне
Time = atof(argv[1]);
if (Time < 0) {
printf("Sorry, timemachine hasn't been invented yet!");
return EXIT_FAILURE;
}
if (argc > 2) {
// Число разбиений по координате
M = atoll(argv[2]);
if (M < 2) {
// Иначе метод не сходится
printf("Invalid values!\n");
return EXIT_FAILURE;
}
if (argc > 3) {
size = atoll(argv[3]);
if (M <= size) {
// Если мелкость разбиения координаты настолько мала,
// что не будут использованы все процессы
printf("Required number of processes is unreasonable \
compared to coordinate partition!\n");
return EXIT_FAILURE;
}
}
}
}
// Шаг по координате
double h = Length / M;
// Шаг по времени (число Куранта)
double tau = 0.3 * h * h;
// Число разбиений по времени
int N = Time / tau;
// Массивы температуры для момента времени n и n + 1 соответственно
double *u0 = (double*) malloc(sizeof(double) * M);
double *u1 = (double*) malloc(sizeof(double) * M);
// Счетчики для циклов по времени и координате
size_t m, n;
// Начальные условия (f(x) = 0 )
for (m = 0; m < M; m++) {
u0[m] = u1[m] = 0.0;
}
// Задаем граничные условия
u0[0] = u1[0] = Temperature_1;
u0[M - 1] = u1[M - 1] = Temperature_2;
// Массив индексов передаваемых точек
size_t *left_index = (size_t*) malloc(sizeof(size_t) * size + 1);
left_index[0] = 1;
// Чтобы избежать костылей при передаче массивов 0-ому процессу,
// определяю правый конец последнего массива
left_index[size] = M - 1;
// Итеративно определяю левые концы отрезков, передаваемые каждому процессу
// Правый конец i-го процесса = левому концу (i + 1)-го
for (int i = 1; i < size; i++) {
left_index[i] = left_index[i - 1] + (M / size) +
((i - 1) < ((M % size) - 2));
}
// Создание массив замков
omp_lock_t* lock = (omp_lock_t*) malloc(sizeof(omp_lock_t) * 2 * size);
// Инициализация массива замков
for (size_t i = 0; i < 2 * size; ++i) {
omp_init_lock(&lock[i]);
}
// Вспомогательная переменная, показывающая кол-во процессов,
// закончивших данную итерацию цикла
size_t epoc = 0;
double time = 0.0;
// Задаем кол-во процессов для следующего распараллеливания
omp_set_num_threads(size);
for (size_t j = 0; j < numexp; j++) {
// Начинаем отсчет времени
double start = omp_get_wtime();
#pragma omp parallel private(n, m)
{
size_t id = omp_get_thread_num();
// Цикл по времени
for (n = 0; n < N; n++) {
// Обнуляем глобальную эпоху
#pragma omp atomic
epoc = 0;
// Явная четырехточечная схема
for (m = left_index[id]; m < left_index[id + 1]; ++m) {
if ((m == left_index[id]) && (id != 0)) {
// Запоминаем боковой узел
omp_set_lock(&lock[id - 1 + size]);
double left = u0[left_index[id] - 1];
omp_unset_lock(&lock[id - 1 + size]);
// Проводим защищенно вычисления
omp_set_lock(&lock[id]);
u1[m] = u0[m] + 0.3 * (left - 2.0 * u0[m] + u0[m + 1]);
omp_unset_lock(&lock[id]);
}
if ((m == left_index[id + 1] - 1) && (id != size - 1)) {
// Запоминаем боковой узел
omp_set_lock(&lock[id + 1]);
double right = u0[left_index[id + 1]];
omp_unset_lock(&lock[id + 1]);
// Проводим защищенно вычисления
omp_set_lock(&lock[id + size]);
u1[m] = u0[m] + 0.3 * (u0[m - 1] - 2.0 * u0[m] + right);
omp_unset_lock(&lock[id + size]);
}
u1[m] = u0[m] + 0.3 * (u0[m - 1] - 2.0 * u0[m] + u0[m + 1]);
}
// Атомарно инкрементируем -> процесс закончил работу
#pragma omp atomic
epoc++;
#pragma omp single
{
// Не обновляем результат, пока не проработали все процессы
while (epoc < size) {
__asm volatile ("pause" ::: "memory");
}
// Обновление результатов
double *t = u0;
u0 = u1;
u1 = t;
}
}
}
// Рассчитываем время работы программы
time += omp_get_wtime() - start;
}
// Удаление замка
for (size_t i = 0; i < 2 * size; ++i) {
omp_destroy_lock(&lock[i]);
}
printf("\n %d %lf\n", size, time / numexp);
// Освобождение памяти
free(u0);
free(u1);
return EXIT_SUCCESS;
}
|
machinedeps.c | /*
Copyright © INRIA 2009-2014.
Authors: Matthijs Douze & Herve Jegou
Contact: matthijs.douze@inria.fr herve.jegou@inria.fr
This file is part of Yael.
Yael is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Yael is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Yael. If not, see <http://www.gnu.org/licenses/>.
*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include <assert.h>
#include <sys/time.h>
#include "machinedeps.h"
static int count_cpu_from_env() {
int ncpu;
if(!getenv("YAEL_COUNT_CPU")) return 0;
if(sscanf(getenv("YAEL_COUNT_CPU"), "%d", &ncpu) != 1 || ncpu <= 0) {
fprintf(stderr, "could not parse YAEL_CPU_COUNT environment variable, using default\n");
return 0;
}
return ncpu;
}
#ifdef __linux__
#define __USE_GNU
#include <sched.h>
int count_cpu (void)
{
int ncpu = count_cpu_from_env();
if(ncpu) return ncpu;
cpu_set_t set;
sched_getaffinity (0, sizeof (cpu_set_t), &set);
int i, count = 0;
for (i = 0; i < CPU_SETSIZE; i++)
if (CPU_ISSET (i, &set))
count++;
return count;
}
#elif defined(__APPLE__)
#include <sys/types.h>
#include <sys/sysctl.h>
int count_cpu (void) {
int ncpu = count_cpu_from_env();
if(ncpu) return ncpu;
int count=-1;
size_t count_size=sizeof(count);
sysctlbyname("hw.ncpu",&count,&count_size,NULL,0);
return count;
}
#else
int count_cpu() {
return 1;
}
#endif
#ifndef __APPLE__
double log2(double x) {
return log(x)/M_LN2;
}
#endif
#ifndef __linux__
void *memalign (size_t ignored, size_t nbytes)
{
return malloc (nbytes);
}
#endif
double getmillisecs()
{
struct timeval tv;
gettimeofday (&tv,NULL);
return tv.tv_sec*1e3 +tv.tv_usec*1e-3;
}
/***********************************************************************
* Implementation of the threading part
*
* generic thread stuff */
#ifdef _OPENMP
#include <omp.h>
#define GET_THREAD_NUM omp_get_thread_num()
#else
#define GET_THREAD_NUM 0
/* #pragma's will be ignored */
#endif
void compute_tasks (int n, int nt,
void (*task_fun) (void *arg, int tid, int i),
void *task_arg)
{
int i;
#pragma omp parallel for schedule(dynamic) num_threads(nt)
for(i = 0; i < n; i++)
(*task_fun)(task_arg, GET_THREAD_NUM, i);
}
|
reconstruction.h | #pragma once
#include <cmath>
#include <vector>
#include <algorithm>
#include <iostream>
#include "response.h"
#include "scanner.h"
#include "2d/geometry/pixel_map.h"
#if USE_FAST_TEXT_PARSER
#include "util/text_parser.h"
#endif
#if _OPENMP
#include <omp.h>
#else
#define omp_get_max_threads() 1
#define omp_get_thread_num() 0
#endif
#include "reconstruction_stats.h"
#define BB_UPDATE 1
namespace PET2D {
namespace Strip {
/// 2D strip PET reconstruction
template <typename FType, typename KernelType> class Reconstruction {
public:
using F = FType;
using Kernel = KernelType;
using Scanner = Strip::Scanner<FType, short>;
using Pixel = typename Scanner::Pixel;
using Point = typename Scanner::Point;
using Vector = typename Scanner::Vector;
using Output = PET2D::PixelMap<Pixel, F>;
Scanner scanner;
Output rho;
Output sensitivity;
private:
const int n_threads;
std::vector<F> acc_log;
std::vector<std::vector<F>> thread_rhos;
Kernel kernel;
ReconstructionStats<size_t> stats_;
public:
const ReconstructionStats<size_t>& stats;
std::vector<Response<F>> responses;
Reconstruction(const Scanner& scanner)
: scanner(scanner),
rho(scanner.n_z_pixels, scanner.n_y_pixels, 1),
sensitivity(scanner.n_z_pixels, scanner.n_y_pixels),
n_threads(omp_get_max_threads()),
thread_rhos(n_threads),
kernel(scanner.sigma_z, scanner.sigma_dl),
stats_(n_threads),
stats(stats_) {
for (int y = 0; y < scanner.n_y_pixels; ++y) {
for (int z = 0; z < scanner.n_z_pixels; ++z) {
sensitivity[y * scanner.n_z_pixels + z] =
// scanner.pixel_sensitivity(Pixel(z, y));
scanner.sensitivity(scanner.pixel_center(Pixel(z, y)));
}
}
}
Reconstruction(F R_distance,
F scintilator_length,
int n_y_pixels,
int n_z_pixels,
F pixel_height,
F pixel_width,
F sigma_z,
F sigma_dl)
: Reconstruction(Scanner(R_distance,
scintilator_length,
n_y_pixels,
n_z_pixels,
pixel_height,
pixel_width,
sigma_z,
sigma_dl)) {}
Reconstruction(F R_distance,
F scintilator_length,
int n_pixels,
F pixel_size,
F sigma_z,
F sigma_dl)
: Reconstruction(R_distance,
scintilator_length,
n_pixels,
n_pixels,
pixel_size,
pixel_size,
sigma_z,
sigma_dl) {}
/// Performs n_iterations of the list mode MLEM algorithm
template <typename ProgressCallback>
void operator()(ProgressCallback& progress, ///< progress callback
int n_iterations, ///< iterations to perform
int n_iterations_so_far = 0 ///< iterations so far
) {
stats_.fill();
for (int iteration = 0; iteration < n_iterations; ++iteration) {
progress(iteration + n_iterations_so_far);
int n_responses = responses.size();
for (auto& thread_rho : thread_rhos) {
thread_rho.assign(scanner.total_n_pixels, 0);
}
#if _OPENMP
#pragma omp parallel for schedule(dynamic)
#endif
for (int e = 0; e < n_responses; ++e) {
int thread = omp_get_thread_num();
stats_.n_events_processed_by(thread, 1);
#if BB_UPDATE
auto response = responses[e];
F tan, y, z;
response.calculate_tan_y_z(scanner.radius, tan, y, z);
bb_update(Point(z, y), y, tan, thread_rhos[thread]);
#else
F y = responses[e].z_u;
F z = responses[e].z_d;
simple_update(Point(z, y), y, z, thread_rhos[thread]);
#endif
}
rho.assign(0);
for (int thread = 0; thread < n_threads; ++thread) {
for (int i = 0; i < scanner.total_n_pixels; ++i) {
rho[i] += thread_rhos[thread][i];
}
}
progress(iteration + n_iterations_so_far, true);
}
stats_.collect();
}
template <typename StreamType> Reconstruction& operator<<(StreamType& in) {
int i = 0;
for (;;) {
F z_u, z_d, dl;
in >> z_u >> z_d >> dl;
if (in.eof())
break;
responses.emplace_back(z_u, z_d, dl);
i++;
}
return *this;
}
#if USE_FAST_TEXT_PARSER
void fast_load_txt_events(const char* fn, bool is_3d = false) {
size_t n_lines = 0;
// first just count lines and reserve space
util::text_parser::read_lines(fn, [&](const char*) { ++n_lines; });
responses.reserve(n_lines);
// now read actual values
util::text_parser::read_lines(
fn,
[&](const char* line) {
util::text_parser parser(line);
F z_u, z_d, dl;
try {
if (is_3d) {
int i, j;
parser >> i >> j // just read LOR values even they are useless
>> z_u >> z_d >> dl;
} else {
parser >> z_u >> z_d >> dl;
}
} catch (const char* ex) {
std::cerr << "error line: " << line << std::endl;
throw(ex);
}
responses.emplace_back(z_u, z_d, dl);
});
}
#endif
template <typename StreamType> StreamType& operator>>(StreamType& out) {
return out << rho;
}
template <typename StreamType>
void output_tuples(StreamType& out, bool output_sensitivity = false) {
auto& output = output_sensitivity ? sensitivity : rho;
for (int y = 0; y < scanner.n_y_pixels; ++y) {
for (auto x = 0; x < scanner.n_z_pixels; ++x) {
auto value = output[y * scanner.n_z_pixels + x];
if (value >= (F)0.000000000001) {
out << x << ' ' << y << ' ' << value << std::endl;
}
}
}
}
private:
int n_pixels_in_line(F length, F pixel_size) const {
return static_cast<int>(length / pixel_size + F(0.5));
}
void bb_update(Point center, F y, F tan, std::vector<F>& output_rho) {
bool use_sensitivity = false;
F sec, A, B, C, bb_y, bb_z;
kernel.ellipse_bb(tan, sec, A, B, C, bb_y, bb_z);
Pixel center_pixel = scanner.pixel_at(center);
const int bb_half_width = n_pixels_in_line(bb_z, scanner.pixel_width);
const int bb_half_height = n_pixels_in_line(bb_y, scanner.pixel_height);
int thread = omp_get_thread_num();
stats_.bb_width_sum_by(thread, 2 * bb_half_width);
stats_.bb_height_sum_by(thread, 2 * bb_half_height);
stats_.bb_width2_sum_by(thread, 4 * bb_half_width * bb_half_width);
stats_.bb_height2_sum_by(thread, 4 * bb_half_height * bb_half_height);
stats_.bb_width_height_sum_by(thread, 4 * bb_half_width * bb_half_height);
Pixel top_left(center_pixel.x - bb_half_width,
center_pixel.y - bb_half_height);
Pixel bottom_right(center_pixel.x + bb_half_width,
center_pixel.y + bb_half_height);
Pixel scanner_top_left(0, 0);
Pixel scanner_bottom_right(scanner.n_z_pixels - 1, scanner.n_y_pixels - 1);
// check boundary conditions
top_left.clamp(scanner_top_left, scanner_bottom_right);
bottom_right.clamp(scanner_top_left, scanner_bottom_right);
const int bb_size = 4 * bb_half_width * bb_half_height;
F* ellipse_kernel_mul_rho = (F*)alloca(bb_size * sizeof(F));
Pixel* ellipse_pixels = (Pixel*)alloca(bb_size * sizeof(Pixel));
int n_ellipse_pixels = 0;
F denominator = 0;
for (int iy = top_left.y; iy < bottom_right.y; ++iy) {
for (int iz = top_left.x; iz < bottom_right.x; ++iz) {
#if DEBUG
std::cout << iy << ' ' << iz << " ";
#endif
stats_.n_pixels_processed_by();
Pixel pixel(iz, iy);
Point point = scanner.pixel_center(pixel);
if (kernel.in_ellipse(A, B, C, center, point)) {
Vector distance = point - center;
#if DEBUG
std::cout << r.x << ' ' << r.y << " ";
#endif
auto pixel_index = pixel.index(scanner.n_z_pixels);
F pixel_sensitivity = use_sensitivity ? sensitivity[pixel_index] : 1;
stats_.n_kernel_calls_by();
F kernel_value = kernel(y, tan, sec, scanner.radius, distance);
#if DEBUG
std::cout << kernel_value;
#endif
F kernel_mul_rho = kernel_value * rho[pixel_index];
denominator += kernel_mul_rho; //* pixel_sensitivity;
ellipse_pixels[n_ellipse_pixels] = pixel;
ellipse_kernel_mul_rho[n_ellipse_pixels] =
kernel_mul_rho / pixel_sensitivity;
++n_ellipse_pixels;
}
#if DEBUG
std::cout << "\n";
#endif
}
}
F inv_denominator = (denominator > 0) ? 1 / denominator : 0;
for (int p = 0; p < n_ellipse_pixels; ++p) {
auto pixel = ellipse_pixels[p];
auto pixel_kernel = ellipse_kernel_mul_rho[p];
output_rho[pixel.index(scanner.n_z_pixels)] +=
pixel_kernel * inv_denominator;
}
}
void simple_update(Point ellipse_center,
F y,
F z,
std::vector<F>& output_rho) {
Pixel center_pixel = scanner.pixel_at(ellipse_center);
int y_line = 3 * scanner.sigma_z / scanner.pixel_width;
int z_line = 3 * scanner.sigma_dl / scanner.pixel_height;
const Pixel tl(center_pixel.x - z_line, center_pixel.y - y_line);
const Pixel br(center_pixel.x + z_line, center_pixel.y + y_line);
const int bb_size = 4 * z_line * y_line;
F* ellipse_kernel_mul_rho = (F*)alloca(bb_size * sizeof(F));
int n_ellipse_pixels = 0;
F denominator = 0;
for (int iy = tl.y; iy < br.y; ++iy) {
for (int iz = tl.x; iz < br.x; ++iz) {
stats_.n_pixels_processed_by();
Pixel pixel(iz, iy);
Point point = scanner.pixel_center(pixel);
stats_.n_kernel_calls_by();
F kernel_value =
kernel.test(y, z, point, scanner.sigma_z, scanner.sigma_dl);
F kernel_mul_rho = kernel_value * rho[pixel.index(scanner.n_z_pixels)];
ellipse_kernel_mul_rho[n_ellipse_pixels++] = kernel_mul_rho;
}
}
F inv_denominator = 1 / denominator;
for (int iy = tl.y; iy < br.y; ++iy) {
for (int iz = tl.x; iz < br.x; ++iz) {
Pixel pixel(iz, iy);
int ik = (pixel.y - tl.y) * z_line * 2 + pixel.x - tl.x;
output_rho[pixel.index(scanner.n_z_pixels)] +=
ellipse_kernel_mul_rho[ik] * inv_denominator;
}
}
}
};
} // Strip
} // PET2D
|
FileParser.h | //
// Created by Timm Felden on 04.11.15.
//
#ifndef SKILL_CPP_COMMON_FILEPARSER_H_H
#define SKILL_CPP_COMMON_FILEPARSER_H_H
#include "../common.h"
#include "../api/SkillFile.h"
#include "ParseException.h"
#include "../streams/FileInputStream.h"
#include "StringPool.h"
#include "AbstractStoragePool.h"
#include "../restrictions/FieldRestriction.h"
#include "../restrictions/TypeRestriction.h"
#include "../fieldTypes/BuiltinFieldType.h"
#include "../fieldTypes/AnnotationType.h"
#include "LazyField.h"
#include <vector>
#include <unordered_map>
#include <string>
#include <iostream>
#include <cassert>
#if defined(_OPENMP)
#include <omp.h>
#endif
/**
* set to 1, to enable debug output; this should be disabled on all commits
*/
#define debugOnly if(0)
namespace skill {
using namespace streams;
using namespace fieldTypes;
using namespace restrictions;
namespace internal {
/**
* Turns a field type into a preliminary type information. In case of user types, the declaration
* of the respective user type may follow after the field declaration.
*/
inline const FieldType *parseFieldType(FileInputStream *in,
const std::vector<AbstractStoragePool *> *types,
StringPool *String,
AnnotationType *Annotation,
int blockCounter) {
const TypeID i = (TypeID) in->v64();
switch (i) {
case 0 :
return new ConstantI8(in->i8());
case 1 :
return new ConstantI16(in->i16());
case 2 :
return new ConstantI32(in->i32());
case 3 :
return new ConstantI64(in->i64());
case 4 :
return new ConstantV64(in->v64());
case 5 :
return Annotation;
case 6 :
return &BoolType;
case 7 :
return &I8;
case 8 :
return &I16;
case 9 :
return &I32;
case 10:
return &I64;
case 11:
return &V64;
case 12:
return &F32;
case 13:
return &F64;
case 14:
return String;
case 15: {
int64_t length = in->v64();
auto t = parseFieldType(in, types, String, Annotation, blockCounter);
return new ConstantLengthArray(length, t);
}
case 17:
return new VariableLengthArray(parseFieldType(in, types, String, Annotation, blockCounter));
case 18:
return new ListType(parseFieldType(in, types, String, Annotation, blockCounter));
case 19:
return new SetType(parseFieldType(in, types, String, Annotation, blockCounter));
case 20:
return new MapType(parseFieldType(in, types, String, Annotation, blockCounter),
parseFieldType(in, types, String, Annotation, blockCounter));
default:
if (i >= 32 && i - 32 < (TypeID) types->size())
return types->at(i - 32);
else
throw ParseException(in, blockCounter,
"Invalid type ID");
}
}
/**
* create a new empty skill file; parametrized by specification dependent functionality.
*/
template<
//!ensures that names of pools and known fields are known upfront, so that it is safe
// to compare their names by pointer value
StringPool *initializeStrings(FileInputStream *),
//!create a new pool in the target type system
AbstractStoragePool *newPool(TypeID typeID,
String name,
AbstractStoragePool *superPool,
std::set<TypeRestriction *> *restrictions,
const AbstractStringKeeper *const keeper),
//! create a new state in the target type system
SkillFile *makeState(FileInputStream *in,
WriteMode mode,
StringPool *String,
AnnotationType *Annotation,
std::vector<AbstractStoragePool *> *types,
api::typeByName_t *typesByName,
std::vector<std::unique_ptr<MappedInStream>> &dataList)
>
SkillFile *newFile(const std::string &path, WriteMode mode) {
FileInputStream *in = new FileInputStream(path, "w");
StringPool *String = initializeStrings(in);
std::vector<AbstractStoragePool *> *types =
new std::vector<AbstractStoragePool *>;
AnnotationType *Annotation = new AnnotationType(types);
api::typeByName_t *typesByName = new api::typeByName_t;
std::vector<std::unique_ptr<MappedInStream>> dataList;
return makeState(in, mode, String,
Annotation, types,
typesByName,
dataList);
}
/**
* parses a skill file; parametrized by specification dependent functionality.
*/
template<
//!ensures that names of pools and known fields are known upfront, so that it is safe
// to compare their names by pointer value
StringPool *initializeStrings(FileInputStream *),
//!create a new pool in the target type system
AbstractStoragePool *newPool(TypeID typeID,
String name,
AbstractStoragePool *superPool,
std::set<TypeRestriction *> *restrictions,
const AbstractStringKeeper *const keeper ),
//! create a new state in the target type system
SkillFile *makeState(FileInputStream *in,
WriteMode mode,
StringPool *String,
AnnotationType *Annotation,
std::vector<AbstractStoragePool *> *types,
api::typeByName_t *typesByName,
std::vector<std::unique_ptr<MappedInStream>> &dataList)
>
SkillFile *parseFile(std::unique_ptr<FileInputStream> in, WriteMode mode) {
struct LFEntry {
LFEntry(AbstractStoragePool *const pool, SKilLID count)
: pool(pool), count(count) {}
AbstractStoragePool *const pool;
const SKilLID count;
};
// PARSE STATE
std::unique_ptr<StringPool> String(initializeStrings(in.get()));
std::vector<AbstractStoragePool *> *types =
new std::vector<AbstractStoragePool *>;
std::unique_ptr<AnnotationType> Annotation(new AnnotationType(types));
std::unique_ptr<api::typeByName_t> typesByName(new api::typeByName_t);
std::vector<std::unique_ptr<MappedInStream>> dataList;
// process stream
debugOnly {
std::cout << std::endl << "file " << in->getPath() << std::endl;
}
for (int blockCounter = 0; !in->eof(); blockCounter++) {
debugOnly {
std::cout << "block " << blockCounter << " starting at " << in->getPosition() << std::endl;
}
// string block
try {
const int count = (int) in->v64();
debugOnly {
std::cout << count << " strings" << std::endl;
}
if (0 != count) {
int last = 0, offset = 0;
const long position = in->getPosition() + 4 * count;
for (int i = count; i != 0; i--) {
offset = in->i32();
String->addPosition(std::pair<long, int>(position + last, offset - last));
last = offset;
}
in->jump(in->getPosition() + last);
}
} catch (SkillException e) {
throw ParseException(in, blockCounter, "corrupted string block");
}
debugOnly {
std::cout << "string block ended at " << in->getPosition() << std::endl;
}
// type block
try {
TypeID typeCount = (TypeID) in->v64();
// this barrier is strictly increasing inside of each block and reset to 0 at the beginning of each block
TypeID blockIDBarrier = 0;
std::set<api::String> seenTypes;
// number of fields to expect for that type in this block
std::vector<LFEntry> localFields;
// parse type definitions
while (typeCount-- > 0) {
api::String name = String->get((SKilLID) in->v64());
// check null name
if (nullptr == name)
throw ParseException(in, blockCounter,
"Corrupted file, nullptr in typename");
debugOnly {
std::cout << "processing type " << *name << " at " << in->getPosition()
<< std::endl;
}
// check duplicate types
if (seenTypes.find(name) != seenTypes.end())
throw ParseException(
in, blockCounter,
std::string("Duplicate definition of type ").append(*name));
seenTypes.insert(name);
const int count = (int) in->v64();
auto defIter = typesByName->find(name);
if (defIter == typesByName->end()) {
// unknown type
// type restrictions
int restrictionCount = (int) in->v64();
auto rest = std::unique_ptr<std::set<TypeRestriction *>>(new std::set<TypeRestriction *>);
//! TODO restrictions
// rest.sizeHint(restrictionCount)
while (restrictionCount-- > 0) {
switch ((char) in->v64()) {
case 0: //restrictions.Unique
break;
case 1: // restrictions.Singleton
break;
case 2: // restrictions.Monotone
break;
case 3: // restrictions.Abstract
break;
case 5:
in->v64(); // restrictions.DefaultTypeRestriction(in.v64.toInt)
break;
default:
ParseException(
in, blockCounter,
"Found an unknown type restriction. Please regenerate your binding, if possible.");
}
// TODO rest +=
}
// super
const TypeID superID = (TypeID) in->v64();
AbstractStoragePool *superPool;
if (0 == superID)
superPool = nullptr;
else if (superID > (TypeID) types->size()) {
throw ParseException(
in, blockCounter,
std::string("Type ").append(*name).append(
" refers to an ill-formed super type."));
} else {
superPool = types->at(superID - 1);
assert(superPool);
}
// allocate pool
AbstractStoragePool *r = newPool(
(TypeID) types->size() + 32, name, superPool, rest.get(), String->keeper);
rest.release();
types->push_back(r);
defIter = typesByName->insert(
std::pair<api::String, AbstractStoragePool *>(name, r)).first;
}
AbstractStoragePool *const definition = defIter->second;
if (blockIDBarrier < definition->typeID)
blockIDBarrier = definition->typeID;
else
throw ParseException(in, blockCounter, "Found unordered type block.");
// in contrast to prior implementation, bpo is the position inside of data, even if there are no actual
// instances. We need this behavior, because that way we can cheaply calculate the number of static instances
const SKilLID lbpo =
definition->basePool->cachedSize + (nullptr == definition->superPool ? 0 : (
0 != count ? (SKilLID) in->v64() :
definition->superPool->blocks.back().bpo));
// ensure that bpo is in fact inside of the parents block
if (definition->superPool) {
const auto &b = definition->superPool->blocks.back();
if (lbpo < b.bpo || b.bpo + b.dynamicCount < lbpo)
throw ParseException(in, blockCounter,
"Found broken bpo.");
}
// static count and cached size are updated in the resize phase
// @note we assume that all dynamic instance are static instances as well, until we know for sure
definition->blocks.push_back(Block(blockCounter, lbpo, count, count));
definition->staticDataInstances += count;
localFields.push_back(LFEntry(definition, (SKilLID) in->v64()));
}
// resize pools, i.e. update cachedSize and staticCount
for (auto &e : localFields) {
const auto p = e.pool;
const auto &b = p->blocks.back();
p->cachedSize += b.dynamicCount;
if (0 != b.dynamicCount) {
// calculate static count of our parent
const auto &parent = p->superPool;
if (parent) {
auto &sb = parent->blocks.back();
// assumed static instances, minus what static instances would be, if p were the first sub pool.
const auto delta = sb.staticCount - (b.bpo - sb.bpo);
// if positive, then we have to subtract it from the assumed static count (local and global)
if (delta > 0) {
sb.staticCount -= delta;
parent->staticDataInstances -= delta;
}
}
}
}
// track offset information, so that we can create the block maps and jump to the next block directly after
// parsing field information
long dataEnd = 0L;
// parse fields
for (const auto &e : localFields) {
const auto &p = e.pool;
TypeID legalFieldIDBarrier = 1 + (TypeID) p->dataFields.size();
const auto &block = p->blocks.back();
auto localFieldCount = e.count;
while (localFieldCount-- > 0) {
const TypeID id = (TypeID) in->v64();
if (id <= 0 || legalFieldIDBarrier < id)
throw ParseException(in, blockCounter,
"Found an illegal field ID.");
long endOffset = 0;
if (id == legalFieldIDBarrier) {
// new field
legalFieldIDBarrier++;
const api::String fieldName = String->get((SKilLID) in->v64());
if (!fieldName)
throw ParseException(in, blockCounter,
"A field has a nullptr as name.");
debugOnly {
std::cout << "processing new field " << *p->name << "." << *fieldName
<< " at " << in->getPosition() << std::endl;
}
const auto t = parseFieldType(in.get(), types, String.get(), Annotation.get(),
blockCounter);
// parse field restrictions
std::set<const restrictions::FieldRestriction *> rest;
int fieldRestrictionCount = (int) in->v64();
for (; fieldRestrictionCount != 0; fieldRestrictionCount--) {
const int i = (const int) in->v64();
switch (i) {
case 0: {// nonnull
rest.insert(restrictions::NonNull::get());
break;
}
case 1: {// default
if (5 == t->typeID || 32 <= t->typeID)
in->v64();
else
t->read(*in);
break;
}
case 3: {
//range
switch (t->typeID) {
case 7:
rest.insert(new restrictions::Range<int8_t>(in->i8(), in->i8()));
break;
case 8:
rest.insert(new restrictions::Range<int16_t>(in->i16(), in->i16()));
break;
case 9:
rest.insert(new restrictions::Range<int32_t>(in->i32(), in->i32()));
break;
case 10:
rest.insert(new restrictions::Range<int64_t>(in->i64(), in->i64()));
break;
case 11:
rest.insert(new restrictions::Range<int64_t>(in->v64(), in->v64()));
break;
case 12:
rest.insert(new restrictions::Range<float>(in->f32(), in->f32()));
break;
case 13:
rest.insert(new restrictions::Range<double>(in->f64(), in->f64()));
break;
default:
throw ParseException(
in, blockCounter,
"Range restricton on a type that can not be restricted.");
}
break;
}
case 5: { // coding
String->get((SKilLID) in->v64());
break;
}
case 7: {
// constant length pointer
break;
}
case 9: {
// oneof
// read array of type IDs
for (int c = in->v64(); c != 0; c--)
in->v64();
break;
}
default:
throw ParseException(
in, blockCounter,
"Found an unknown field restriction. Please regenerate your binding, if possible.");
}
}
endOffset = in->v64();
auto f = p->addField(String->keeper, id, t, fieldName);
for (auto r : rest)
f->addRestriction(r);
f->addChunk(
new BulkChunk(dataEnd, endOffset, p->cachedSize, p->blocks.size()));
} else {
// known field
endOffset = in->v64();
p->dataFields[id - 1]->addChunk(
new SimpleChunk(dataEnd, endOffset, block.dynamicCount, block.bpo));
}
dataEnd = endOffset;
}
}
debugOnly {
std::cout << "reached end of type header at " << in->getPosition() << std::endl;
}
// jump over data and continue in the next block
dataList.push_back(std::unique_ptr<MappedInStream>(in->jumpAndMap(dataEnd)));
} catch (SkillException e) {
throw e;
} catch (...) {
throw ParseException(in, blockCounter, "unexpected foreign exception");
}
}
// note there still isn't a single instance
return makeState(in.release(), mode, String.release(), Annotation.release(), types,
typesByName.release(),
dataList);
}
/**
* has to be called by make state after instances have been allocated to ensure
* that required fields are read from file
*/
inline void triggerFieldDeserialization(std::vector<AbstractStoragePool *> *types,
std::vector<std::unique_ptr<MappedInStream>> &dataList) {
std::vector<std::string *> results;
#pragma omp parallel for schedule(dynamic) num_threads(omp_get_max_threads()/2)
for (size_t i = 0; i < types->size(); i++) {
auto t = types->at(i);
#pragma omp parallel for schedule(dynamic) num_threads(2)
for (size_t j = 0; j < t->dataFields.size(); j++) {
auto f = t->dataFields[j];
int bsIndex = 0;
for (Chunk *dc : f->dataChunks) {
if (dynamic_cast<BulkChunk *>(dc)) {
// skip blocks that do not contain data for our field
bsIndex += ((BulkChunk *) dc)->blockCount - 1;
}
const int blockIndex = t->blocks[bsIndex++].blockIndex;
if (dc->count) {
MappedInStream *part = dataList[blockIndex].get();
skill::streams::MappedInStream in(part, dc->begin, dc->end);
try {
if (auto c = dynamic_cast<const ::skill::internal::SimpleChunk *>(dc)) {
int i = c->bpo + 1;
f->rsc(i, i + c->count, &in);
} else {
auto bc = dynamic_cast<const ::skill::internal::BulkChunk *>(dc);
f->rbc(&in, bc);
}
if (!(in.eof() || nullptr != dynamic_cast<::skill::internal::LazyField *>(f))) {
#pragma omp critical
{
std::stringstream message;
message << "ParseException while parsing field.\n Position"
<< in.getPosition()
<< "\n reason: Did not consume all bytes." << std::endl;
results.push_back(new std::string(message.str()));
}
};
} catch (SkillException e) {
#pragma omp critical
{
std::stringstream message;
message << "ParseException while parsing field.\n Position"
<< in.getPosition()
<< "\n reason: "
<< e.message << std::endl;
results.push_back(new std::string(message.str()));
}
} catch (...) {
#pragma omp critical
{
results.push_back(new std::string("unknown error in concurrent read"));
}
}
}
}
}
}
// check for errors
if (results.size()) {
std::stringstream msg;
for (const auto s : results) {
if (s) {
msg << *s << std::endl;
delete s;
}
}
throw SkillException(msg.str());
}
}
}
}
#undef debugOnly
#endif //SKILL_CPP_COMMON_FILEPARSER_H_H
|
canonicalForLoops.c | #ifdef _CIVL
#include <civlc.cvh>
#endif
#include <omp.h>
#include <stdio.h>
int main (int argc, char * argv[]){
double a[3];
double b[3];
double c[3];
int zero = 0;
int three = 3;
int one = 1;
int i;
#pragma omp parallel
{
#pragma omp for
for(i=zero; three > i; i+=one)
a[i] = i;
}
#pragma omp parallel
{
#pragma omp for
for(int j=three; j > zero; j = j - 1){
b[three - j] = three - j;
}
}
#pragma omp parallel
{
#pragma omp for
for(int j=three; j >= one; j--){
c[three - j] = three - j;
}
}
//Properties checking
for(int j = 0; j<three; j++){
$assert(a[j] == b[j]);
$assert(b[j] == j);
$assert(c[j] == b[j]);
}
return 0;
}
|
HYPRE_IJMatrix.c | /******************************************************************************
* Copyright (c) 1998 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* HYPRE_IJMatrix interface
*
*****************************************************************************/
#include "./_hypre_IJ_mv.h"
#include "../HYPRE.h"
/*--------------------------------------------------------------------------
* HYPRE_IJMatrixCreate
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixCreate( MPI_Comm comm,
HYPRE_BigInt ilower,
HYPRE_BigInt iupper,
HYPRE_BigInt jlower,
HYPRE_BigInt jupper,
HYPRE_IJMatrix *matrix )
{
HYPRE_BigInt info[2];
HYPRE_Int num_procs;
HYPRE_Int myid;
hypre_IJMatrix *ijmatrix;
HYPRE_BigInt row0, col0, rowN, colN;
ijmatrix = hypre_CTAlloc(hypre_IJMatrix, 1, HYPRE_MEMORY_HOST);
hypre_IJMatrixComm(ijmatrix) = comm;
hypre_IJMatrixObject(ijmatrix) = NULL;
hypre_IJMatrixTranslator(ijmatrix) = NULL;
hypre_IJMatrixAssumedPart(ijmatrix) = NULL;
hypre_IJMatrixObjectType(ijmatrix) = HYPRE_UNITIALIZED;
hypre_IJMatrixAssembleFlag(ijmatrix) = 0;
hypre_IJMatrixPrintLevel(ijmatrix) = 0;
hypre_IJMatrixOMPFlag(ijmatrix) = 0;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &myid);
if (ilower > iupper + 1 || ilower < 0)
{
hypre_error_in_arg(2);
hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
if (iupper < -1)
{
hypre_error_in_arg(3);
hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
if (jlower > jupper + 1 || jlower < 0)
{
hypre_error_in_arg(4);
hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
if (jupper < -1)
{
hypre_error_in_arg(5);
hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
hypre_IJMatrixRowPartitioning(ijmatrix)[0] = ilower;
hypre_IJMatrixRowPartitioning(ijmatrix)[1] = iupper + 1;
hypre_IJMatrixColPartitioning(ijmatrix)[0] = jlower;
hypre_IJMatrixColPartitioning(ijmatrix)[1] = jupper + 1;
/* now we need the global number of rows and columns as well
as the global first row and column index */
/* proc 0 has the first row and col */
if (myid == 0)
{
info[0] = ilower;
info[1] = jlower;
}
hypre_MPI_Bcast(info, 2, HYPRE_MPI_BIG_INT, 0, comm);
row0 = info[0];
col0 = info[1];
/* proc (num_procs-1) has the last row and col */
if (myid == (num_procs - 1))
{
info[0] = iupper;
info[1] = jupper;
}
hypre_MPI_Bcast(info, 2, HYPRE_MPI_BIG_INT, num_procs - 1, comm);
rowN = info[0];
colN = info[1];
hypre_IJMatrixGlobalFirstRow(ijmatrix) = row0;
hypre_IJMatrixGlobalFirstCol(ijmatrix) = col0;
hypre_IJMatrixGlobalNumRows(ijmatrix) = rowN - row0 + 1;
hypre_IJMatrixGlobalNumCols(ijmatrix) = colN - col0 + 1;
*matrix = (HYPRE_IJMatrix) ijmatrix;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixDestroy( HYPRE_IJMatrix matrix )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (ijmatrix)
{
if hypre_IJMatrixAssumedPart(ijmatrix)
{
hypre_AssumedPartitionDestroy((hypre_IJAssumedPart*)hypre_IJMatrixAssumedPart(ijmatrix));
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
hypre_IJMatrixDestroyParCSR( ijmatrix );
}
else if ( hypre_IJMatrixObjectType(ijmatrix) != -1 )
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
}
hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixInitialize( HYPRE_IJMatrix matrix )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
hypre_IJMatrixInitializeParCSR( ijmatrix ) ;
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
HYPRE_Int
HYPRE_IJMatrixInitialize_v2( HYPRE_IJMatrix matrix, HYPRE_MemoryLocation memory_location )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
hypre_IJMatrixInitializeParCSR_v2( ijmatrix, memory_location ) ;
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixSetPrintLevel( HYPRE_IJMatrix matrix,
HYPRE_Int print_level )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_IJMatrixPrintLevel(ijmatrix) = 1;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* This is a helper routine to compute a prefix sum of integer values.
*
* The current implementation is okay for modest numbers of threads.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_PrefixSumInt(HYPRE_Int nvals,
HYPRE_Int *vals,
HYPRE_Int *sums)
{
HYPRE_Int j, nthreads, bsize;
nthreads = hypre_NumThreads();
bsize = (nvals + nthreads - 1) / nthreads; /* This distributes the remainder */
if (nvals < nthreads || bsize == 1)
{
sums[0] = 0;
for (j = 1; j < nvals; j++)
{
sums[j] += sums[j - 1] + vals[j - 1];
}
}
else
{
/* Compute preliminary partial sums (in parallel) within each interval */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < nvals; j += bsize)
{
HYPRE_Int i, n = hypre_min((j + bsize), nvals);
sums[j] = 0;
for (i = j + 1; i < n; i++)
{
sums[i] = sums[i - 1] + vals[i - 1];
}
}
/* Compute final partial sums (in serial) for the first entry of every interval */
for (j = bsize; j < nvals; j += bsize)
{
sums[j] = sums[j - bsize] + sums[j - 1] + vals[j - 1];
}
/* Compute final partial sums (in parallel) for the remaining entries */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for (j = bsize; j < nvals; j += bsize)
{
HYPRE_Int i, n = hypre_min((j + bsize), nvals);
for (i = j + 1; i < n; i++)
{
sums[i] += sums[j];
}
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixSetValues( HYPRE_IJMatrix matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_BigInt *rows,
const HYPRE_BigInt *cols,
const HYPRE_Complex *values )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (nrows == 0)
{
return hypre_error_flag;
}
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
/*
if (!ncols)
{
hypre_error_in_arg(3);
return hypre_error_flag;
}
*/
if (!rows)
{
hypre_error_in_arg(4);
return hypre_error_flag;
}
if (!cols)
{
hypre_error_in_arg(5);
return hypre_error_flag;
}
if (!values)
{
hypre_error_in_arg(6);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) != HYPRE_PARCSR )
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
HYPRE_IJMatrixSetValues2(matrix, nrows, ncols, rows, NULL, cols, values);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixSetValues2( HYPRE_IJMatrix matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_BigInt *rows,
const HYPRE_Int *row_indexes,
const HYPRE_BigInt *cols,
const HYPRE_Complex *values )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (nrows == 0)
{
return hypre_error_flag;
}
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (nrows < 0)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
/*
if (!ncols)
{
hypre_error_in_arg(3);
return hypre_error_flag;
}
*/
if (!rows)
{
hypre_error_in_arg(4);
return hypre_error_flag;
}
if (!cols)
{
hypre_error_in_arg(6);
return hypre_error_flag;
}
if (!values)
{
hypre_error_in_arg(7);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) != HYPRE_PARCSR )
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_IJMatrixMemoryLocation(matrix) );
if (exec == HYPRE_EXEC_DEVICE)
{
hypre_IJMatrixSetAddValuesParCSRDevice(ijmatrix, nrows, ncols, rows, row_indexes, cols, values,
"set");
}
else
#endif
{
HYPRE_Int *row_indexes_tmp = (HYPRE_Int *) row_indexes;
HYPRE_Int *ncols_tmp = ncols;
if (!ncols_tmp)
{
HYPRE_Int i;
ncols_tmp = hypre_TAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_HOST);
for (i = 0; i < nrows; i++)
{
ncols_tmp[i] = 1;
}
}
if (!row_indexes)
{
row_indexes_tmp = hypre_CTAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_HOST);
hypre_PrefixSumInt(nrows, ncols_tmp, row_indexes_tmp);
}
if (hypre_IJMatrixOMPFlag(ijmatrix))
{
hypre_IJMatrixSetValuesOMPParCSR(ijmatrix, nrows, ncols_tmp, rows, row_indexes_tmp, cols, values);
}
else
{
hypre_IJMatrixSetValuesParCSR(ijmatrix, nrows, ncols_tmp, rows, row_indexes_tmp, cols, values);
}
if (!ncols)
{
hypre_TFree(ncols_tmp, HYPRE_MEMORY_HOST);
}
if (!row_indexes)
{
hypre_TFree(row_indexes_tmp, HYPRE_MEMORY_HOST);
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixSetConstantValues( HYPRE_IJMatrix matrix, HYPRE_Complex value)
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
return ( hypre_IJMatrixSetConstantValuesParCSR( ijmatrix, value));
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixAddToValues( HYPRE_IJMatrix matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_BigInt *rows,
const HYPRE_BigInt *cols,
const HYPRE_Complex *values )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (nrows == 0)
{
return hypre_error_flag;
}
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (nrows < 0)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
/*
if (!ncols)
{
hypre_error_in_arg(3);
return hypre_error_flag;
}
*/
if (!rows)
{
hypre_error_in_arg(4);
return hypre_error_flag;
}
if (!cols)
{
hypre_error_in_arg(5);
return hypre_error_flag;
}
if (!values)
{
hypre_error_in_arg(6);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) != HYPRE_PARCSR )
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
HYPRE_IJMatrixAddToValues2(matrix, nrows, ncols, rows, NULL, cols, values);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixAddToValues2( HYPRE_IJMatrix matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_BigInt *rows,
const HYPRE_Int *row_indexes,
const HYPRE_BigInt *cols,
const HYPRE_Complex *values )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (nrows == 0)
{
return hypre_error_flag;
}
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (nrows < 0)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
/*
if (!ncols)
{
hypre_error_in_arg(3);
return hypre_error_flag;
}
*/
if (!rows)
{
hypre_error_in_arg(4);
return hypre_error_flag;
}
if (!cols)
{
hypre_error_in_arg(6);
return hypre_error_flag;
}
if (!values)
{
hypre_error_in_arg(7);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) != HYPRE_PARCSR )
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_IJMatrixMemoryLocation(matrix) );
if (exec == HYPRE_EXEC_DEVICE)
{
hypre_IJMatrixSetAddValuesParCSRDevice(ijmatrix, nrows, ncols, rows, row_indexes, cols, values,
"add");
}
else
#endif
{
HYPRE_Int *row_indexes_tmp = (HYPRE_Int *) row_indexes;
HYPRE_Int *ncols_tmp = ncols;
if (!ncols_tmp)
{
HYPRE_Int i;
ncols_tmp = hypre_TAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_HOST);
for (i = 0; i < nrows; i++)
{
ncols_tmp[i] = 1;
}
}
if (!row_indexes)
{
row_indexes_tmp = hypre_CTAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_HOST);
hypre_PrefixSumInt(nrows, ncols_tmp, row_indexes_tmp);
}
if (hypre_IJMatrixOMPFlag(ijmatrix))
{
hypre_IJMatrixAddToValuesOMPParCSR(ijmatrix, nrows, ncols_tmp, rows, row_indexes_tmp, cols, values);
}
else
{
hypre_IJMatrixAddToValuesParCSR(ijmatrix, nrows, ncols_tmp, rows, row_indexes_tmp, cols, values);
}
if (!ncols)
{
hypre_TFree(ncols_tmp, HYPRE_MEMORY_HOST);
}
if (!row_indexes)
{
hypre_TFree(row_indexes_tmp, HYPRE_MEMORY_HOST);
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixAssemble( HYPRE_IJMatrix matrix )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_IJMatrixMemoryLocation(matrix) );
if (exec == HYPRE_EXEC_DEVICE)
{
return ( hypre_IJMatrixAssembleParCSRDevice( ijmatrix ) );
}
else
#endif
{
return ( hypre_IJMatrixAssembleParCSR( ijmatrix ) );
}
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixGetRowCounts( HYPRE_IJMatrix matrix,
HYPRE_Int nrows,
HYPRE_BigInt *rows,
HYPRE_Int *ncols )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (nrows == 0)
{
return hypre_error_flag;
}
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (nrows < 0)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
if (!rows)
{
hypre_error_in_arg(3);
return hypre_error_flag;
}
if (!ncols)
{
hypre_error_in_arg(4);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
hypre_IJMatrixGetRowCountsParCSR( ijmatrix, nrows, rows, ncols );
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixGetValues( HYPRE_IJMatrix matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
HYPRE_BigInt *rows,
HYPRE_BigInt *cols,
HYPRE_Complex *values )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (nrows == 0)
{
return hypre_error_flag;
}
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (!ncols)
{
hypre_error_in_arg(3);
return hypre_error_flag;
}
if (!rows)
{
hypre_error_in_arg(4);
return hypre_error_flag;
}
if (!cols)
{
hypre_error_in_arg(5);
return hypre_error_flag;
}
if (!values)
{
hypre_error_in_arg(6);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
hypre_IJMatrixGetValuesParCSR( ijmatrix, nrows, ncols,
rows, cols, values );
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixSetObjectType( HYPRE_IJMatrix matrix,
HYPRE_Int type )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_IJMatrixObjectType(ijmatrix) = type;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixGetObjectType( HYPRE_IJMatrix matrix,
HYPRE_Int *type )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*type = hypre_IJMatrixObjectType(ijmatrix);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixGetLocalRange( HYPRE_IJMatrix matrix,
HYPRE_BigInt *ilower,
HYPRE_BigInt *iupper,
HYPRE_BigInt *jlower,
HYPRE_BigInt *jupper )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
HYPRE_BigInt *row_partitioning;
HYPRE_BigInt *col_partitioning;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
row_partitioning = hypre_IJMatrixRowPartitioning(ijmatrix);
col_partitioning = hypre_IJMatrixColPartitioning(ijmatrix);
*ilower = row_partitioning[0];
*iupper = row_partitioning[1] - 1;
*jlower = col_partitioning[0];
*jupper = col_partitioning[1] - 1;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
/**
Returns a pointer to an underlying ijmatrix type used to implement IJMatrix.
Assumes that the implementation has an underlying matrix, so it would not
work with a direct implementation of IJMatrix.
@return integer error code
@param IJMatrix [IN]
The ijmatrix to be pointed to.
*/
HYPRE_Int
HYPRE_IJMatrixGetObject( HYPRE_IJMatrix matrix,
void **object )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*object = hypre_IJMatrixObject( ijmatrix );
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixSetRowSizes( HYPRE_IJMatrix matrix,
const HYPRE_Int *sizes )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
return ( hypre_IJMatrixSetRowSizesParCSR( ijmatrix, sizes ) );
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixSetDiagOffdSizes( HYPRE_IJMatrix matrix,
const HYPRE_Int *diag_sizes,
const HYPRE_Int *offdiag_sizes )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
hypre_IJMatrixSetDiagOffdSizesParCSR( ijmatrix, diag_sizes, offdiag_sizes );
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixSetMaxOffProcElmts( HYPRE_IJMatrix matrix,
HYPRE_Int max_off_proc_elmts)
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
return ( hypre_IJMatrixSetMaxOffProcElmtsParCSR(ijmatrix,
max_off_proc_elmts) );
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJMatrixRead
* create IJMatrix on host memory
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixRead( const char *filename,
MPI_Comm comm,
HYPRE_Int type,
HYPRE_IJMatrix *matrix_ptr )
{
HYPRE_IJMatrix matrix;
HYPRE_BigInt ilower, iupper, jlower, jupper;
HYPRE_BigInt I, J;
HYPRE_Int ncols;
HYPRE_Complex value;
HYPRE_Int myid, ret;
char new_filename[255];
FILE *file;
hypre_MPI_Comm_rank(comm, &myid);
hypre_sprintf(new_filename, "%s.%05d", filename, myid);
if ((file = fopen(new_filename, "r")) == NULL)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_fscanf(file, "%b %b %b %b", &ilower, &iupper, &jlower, &jupper);
HYPRE_IJMatrixCreate(comm, ilower, iupper, jlower, jupper, &matrix);
HYPRE_IJMatrixSetObjectType(matrix, type);
HYPRE_IJMatrixInitialize_v2(matrix, HYPRE_MEMORY_HOST);
/* It is important to ensure that whitespace follows the index value to help
* catch mistakes in the input file. See comments in IJVectorRead(). */
ncols = 1;
while ( (ret = hypre_fscanf(file, "%b %b%*[ \t]%le", &I, &J, &value)) != EOF )
{
if (ret != 3)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC, "Error in IJ matrix input file.");
return hypre_error_flag;
}
if (I < ilower || I > iupper)
{
HYPRE_IJMatrixAddToValues(matrix, 1, &ncols, &I, &J, &value);
}
else
{
HYPRE_IJMatrixSetValues(matrix, 1, &ncols, &I, &J, &value);
}
}
HYPRE_IJMatrixAssemble(matrix);
fclose(file);
*matrix_ptr = matrix;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJMatrixPrint
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixPrint( HYPRE_IJMatrix matrix,
const char *filename )
{
if (!matrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if ( (hypre_IJMatrixObjectType(matrix) != HYPRE_PARCSR) )
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
void *object;
HYPRE_IJMatrixGetObject(matrix, &object);
HYPRE_ParCSRMatrix par_csr = (HYPRE_ParCSRMatrix) object;
HYPRE_MemoryLocation memory_location = hypre_IJMatrixMemoryLocation(matrix);
if ( hypre_GetActualMemLocation(memory_location) == hypre_MEMORY_HOST )
{
hypre_ParCSRMatrixPrintIJ(par_csr, 0, 0, filename);
}
else
{
HYPRE_ParCSRMatrix par_csr2 = hypre_ParCSRMatrixClone_v2(par_csr, 1, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixPrintIJ(par_csr2, 0, 0, filename);
hypre_ParCSRMatrixDestroy(par_csr2);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJMatrixSetOMPFlag
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixSetOMPFlag( HYPRE_IJMatrix matrix,
HYPRE_Int omp_flag )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_IJMatrixOMPFlag(ijmatrix) = omp_flag;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJMatrixTranspose
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixTranspose( HYPRE_IJMatrix matrix_A,
HYPRE_IJMatrix *matrix_AT )
{
hypre_IJMatrix *ij_A = (hypre_IJMatrix *) matrix_A;
hypre_IJMatrix *ij_AT;
HYPRE_Int i;
if (!ij_A)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
ij_AT = hypre_CTAlloc(hypre_IJMatrix, 1, HYPRE_MEMORY_HOST);
hypre_IJMatrixComm(ij_AT) = hypre_IJMatrixComm(ij_A);
hypre_IJMatrixObject(ij_AT) = NULL;
hypre_IJMatrixTranslator(ij_AT) = NULL;
hypre_IJMatrixAssumedPart(ij_AT) = NULL;
hypre_IJMatrixObjectType(ij_AT) = hypre_IJMatrixObjectType(ij_A);
hypre_IJMatrixAssembleFlag(ij_AT) = 1;
hypre_IJMatrixPrintLevel(ij_AT) = hypre_IJMatrixPrintLevel(ij_A);
hypre_IJMatrixGlobalFirstRow(ij_AT) = hypre_IJMatrixGlobalFirstCol(ij_A);
hypre_IJMatrixGlobalFirstCol(ij_AT) = hypre_IJMatrixGlobalFirstRow(ij_A);
hypre_IJMatrixGlobalNumRows(ij_AT) = hypre_IJMatrixGlobalNumCols(ij_A);
hypre_IJMatrixGlobalNumCols(ij_AT) = hypre_IJMatrixGlobalNumRows(ij_A);
for (i = 0; i < 2; i++)
{
hypre_IJMatrixRowPartitioning(ij_AT)[i] = hypre_IJMatrixColPartitioning(ij_A)[i];
hypre_IJMatrixColPartitioning(ij_AT)[i] = hypre_IJMatrixRowPartitioning(ij_A)[i];
}
if (hypre_IJMatrixObjectType(ij_A) == HYPRE_PARCSR)
{
hypre_IJMatrixTransposeParCSR(ij_A, ij_AT);
}
else
{
hypre_error_in_arg(1);
}
*matrix_AT = (HYPRE_IJMatrix) ij_AT;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJMatrixNorm
*
* TODO: Add other norms
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixNorm( HYPRE_IJMatrix matrix,
HYPRE_Real *norm )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR)
{
hypre_IJMatrixNormParCSR(ijmatrix, norm);
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJMatrixAdd
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixAdd( HYPRE_Complex alpha,
HYPRE_IJMatrix matrix_A,
HYPRE_Complex beta,
HYPRE_IJMatrix matrix_B,
HYPRE_IJMatrix *matrix_C )
{
hypre_IJMatrix *ij_A = (hypre_IJMatrix *) matrix_A;
hypre_IJMatrix *ij_B = (hypre_IJMatrix *) matrix_B;
hypre_IJMatrix *ij_C;
HYPRE_BigInt *row_partitioning_A;
HYPRE_BigInt *col_partitioning_A;
HYPRE_BigInt *row_partitioning_B;
HYPRE_BigInt *col_partitioning_B;
HYPRE_Int i;
if (!ij_A)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
/* Check if A and B have the same row/col partitionings */
row_partitioning_A = hypre_IJMatrixRowPartitioning(ij_A);
row_partitioning_B = hypre_IJMatrixRowPartitioning(ij_B);
col_partitioning_A = hypre_IJMatrixColPartitioning(ij_A);
col_partitioning_B = hypre_IJMatrixColPartitioning(ij_B);
for (i = 0; i < 2; i++)
{
if (row_partitioning_A[i] != row_partitioning_B[i])
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,
"Input matrices must have same row partitioning!");
return hypre_error_flag;
}
if (col_partitioning_A[i] != col_partitioning_B[i])
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,
"Input matrices must have same col partitioning!");
return hypre_error_flag;
}
}
ij_C = hypre_CTAlloc(hypre_IJMatrix, 1, HYPRE_MEMORY_HOST);
hypre_IJMatrixComm(ij_C) = hypre_IJMatrixComm(ij_A);
hypre_IJMatrixObject(ij_C) = NULL;
hypre_IJMatrixTranslator(ij_C) = NULL;
hypre_IJMatrixAssumedPart(ij_C) = NULL;
hypre_IJMatrixObjectType(ij_C) = hypre_IJMatrixObjectType(ij_A);
hypre_IJMatrixAssembleFlag(ij_C) = 1;
hypre_IJMatrixPrintLevel(ij_C) = hypre_IJMatrixPrintLevel(ij_A);
/* Copy row/col partitioning of A to C */
for (i = 0; i < 2; i++)
{
hypre_IJMatrixRowPartitioning(ij_C)[i] = row_partitioning_A[i];
hypre_IJMatrixColPartitioning(ij_C)[i] = col_partitioning_A[i];
}
if (hypre_IJMatrixObjectType(ij_A) == HYPRE_PARCSR)
{
hypre_IJMatrixAddParCSR(alpha, ij_A, beta, ij_B, ij_C);
}
else
{
hypre_error_in_arg(1);
}
*matrix_C = (HYPRE_IJMatrix) ij_C;
return hypre_error_flag;
}
|
configurator.c | /* Simple tool to create config.h.
* Would be much easier with ccan modules, but deliberately standalone.
*
* Copyright 2011 Rusty Russell <rusty@rustcorp.com.au>. MIT license.
*
* c12r_err, c12r_errx functions copied from ccan/err/err.c
* Copyright Rusty Russell <rusty@rustcorp.com.au>. CC0 (Public domain) License.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#define _POSIX_C_SOURCE 200809L /* For pclose, popen, strdup */
#define EXIT_BAD_USAGE 1
#define EXIT_TROUBLE_RUNNING 2
#define EXIT_BAD_TEST 3
#define EXIT_BAD_INPUT 4
#include <errno.h>
#include <stdio.h>
#include <stdarg.h>
#include <stdbool.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#ifdef _MSC_VER
#define popen _popen
#define pclose _pclose
#endif
#ifdef _MSC_VER
#define DEFAULT_COMPILER "cl"
/* Note: Dash options avoid POSIX path conversion when used under msys bash
* and are therefore preferred to slash (e.g. -nologo over /nologo)
* Note: Disable Warning 4200 "nonstandard extension used : zero-sized array
* in struct/union" for flexible array members.
*/
#define DEFAULT_FLAGS "-nologo -Zi -W4 -wd4200 " \
"-D_CRT_NONSTDC_NO_WARNINGS -D_CRT_SECURE_NO_WARNINGS"
#define DEFAULT_OUTPUT_EXE_FLAG "-Fe:"
#else
#define DEFAULT_COMPILER "cc"
#define DEFAULT_FLAGS "-g3 -ggdb -Wall -Wundef -Wmissing-prototypes -Wmissing-declarations -Wstrict-prototypes -Wold-style-definition"
#define DEFAULT_OUTPUT_EXE_FLAG "-o"
#endif
#define OUTPUT_FILE "configurator.out"
#define INPUT_FILE "configuratortest.c"
#ifdef _WIN32
#define DIR_SEP "\\"
#else
#define DIR_SEP "/"
#endif
static const char *progname = "";
static int verbose;
static bool like_a_libtool = false;
struct test {
const char *name;
const char *desc;
/*
* Template style flags (pick one):
* OUTSIDE_MAIN:
* - put a simple boilerplate main below it.
* DEFINES_FUNC:
* - defines a static function called func; adds ref to avoid warnings
* INSIDE_MAIN:
* - put this inside main().
* DEFINES_EVERYTHING:
* - don't add any boilerplate at all.
*
* Execution flags:
* EXECUTE:
* - a runtime test; must compile, exit 0 means flag is set.
* MAY_NOT_COMPILE:
* - Only useful with EXECUTE: don't get upset if it doesn't compile.
* <nothing>:
* - a compile test, if it compiles must run and exit 0.
*/
const char *style;
const char *depends;
const char *link;
const char *fragment;
const char *flags;
const char *overrides; /* On success, force this to '1' */
bool done;
bool answer;
};
/* Terminated by a NULL name */
static struct test *tests;
static const struct test base_tests[] = {
{ "HAVE_32BIT_OFF_T", "off_t is 32 bits",
"DEFINES_EVERYTHING|EXECUTE|MAY_NOT_COMPILE", NULL, NULL,
"#include <sys/types.h>\n"
"int main(void) {\n"
" return sizeof(off_t) == 4 ? 0 : 1;\n"
"}\n" },
{ "HAVE_ALIGNOF", "__alignof__ support",
"INSIDE_MAIN", NULL, NULL,
"return __alignof__(double) > 0 ? 0 : 1;" },
{ "HAVE_ASPRINTF", "asprintf() declaration",
"DEFINES_FUNC", NULL, NULL,
"#ifndef _GNU_SOURCE\n"
"#define _GNU_SOURCE\n"
"#endif\n"
"#include <stdio.h>\n"
"static char *func(int x) {"
" char *p;\n"
" if (asprintf(&p, \"%u\", x) == -1) \n"
" p = NULL;\n"
" return p;\n"
"}" },
{ "HAVE_ATTRIBUTE_COLD", "__attribute__((cold)) support",
"DEFINES_FUNC", NULL, NULL,
"static int __attribute__((cold)) func(int x) { return x; }" },
{ "HAVE_ATTRIBUTE_CONST", "__attribute__((const)) support",
"DEFINES_FUNC", NULL, NULL,
"static int __attribute__((const)) func(int x) { return x; }" },
{ "HAVE_ATTRIBUTE_DEPRECATED", "__attribute__((deprecated)) support",
"DEFINES_FUNC", NULL, NULL,
"static int __attribute__((deprecated)) func(int x) { return x; }" },
{ "HAVE_ATTRIBUTE_NONNULL", "__attribute__((nonnull)) support",
"DEFINES_FUNC", NULL, NULL,
"static char *__attribute__((nonnull)) func(char *p) { return p; }" },
{ "HAVE_ATTRIBUTE_SENTINEL", "__attribute__((sentinel)) support",
"DEFINES_FUNC", NULL, NULL,
"static int __attribute__((sentinel)) func(int i, ...) { return i; }" },
{ "HAVE_ATTRIBUTE_PURE", "__attribute__((pure)) support",
"DEFINES_FUNC", NULL, NULL,
"static int __attribute__((pure)) func(int x) { return x; }" },
{ "HAVE_ATTRIBUTE_MAY_ALIAS", "__attribute__((may_alias)) support",
"OUTSIDE_MAIN", NULL, NULL,
"typedef short __attribute__((__may_alias__)) short_a;" },
{ "HAVE_ATTRIBUTE_NORETURN", "__attribute__((noreturn)) support",
"DEFINES_FUNC", NULL, NULL,
"#include <stdlib.h>\n"
"static void __attribute__((noreturn)) func(int x) { exit(x); }" },
{ "HAVE_ATTRIBUTE_PRINTF", "__attribute__ format printf support",
"DEFINES_FUNC", NULL, NULL,
"static void __attribute__((format(__printf__, 1, 2))) func(const char *fmt, ...) { (void)fmt; }" },
{ "HAVE_ATTRIBUTE_UNUSED", "__attribute__((unused)) support",
"OUTSIDE_MAIN", NULL, NULL,
"static int __attribute__((unused)) func(int x) { return x; }" },
{ "HAVE_ATTRIBUTE_USED", "__attribute__((used)) support",
"OUTSIDE_MAIN", NULL, NULL,
"static int __attribute__((used)) func(int x) { return x; }" },
{ "HAVE_BACKTRACE", "backtrace() in <execinfo.h>",
"DEFINES_FUNC", NULL, NULL,
"#include <execinfo.h>\n"
"static int func(int x) {"
" void *bt[10];\n"
" return backtrace(bt, 10) < x;\n"
"}" },
{ "HAVE_BIG_ENDIAN", "big endian",
"INSIDE_MAIN|EXECUTE", NULL, NULL,
"union { int i; char c[sizeof(int)]; } u;\n"
"u.i = 0x01020304;\n"
"return u.c[0] == 0x01 && u.c[1] == 0x02 && u.c[2] == 0x03 && u.c[3] == 0x04 ? 0 : 1;" },
{ "HAVE_BSWAP_64", "bswap64 in byteswap.h",
"DEFINES_FUNC", "HAVE_BYTESWAP_H", NULL,
"#include <byteswap.h>\n"
"static int func(int x) { return bswap_64(x); }" },
{ "HAVE_BUILTIN_CHOOSE_EXPR", "__builtin_choose_expr support",
"INSIDE_MAIN", NULL, NULL,
"return __builtin_choose_expr(1, 0, \"garbage\");" },
{ "HAVE_BUILTIN_CLZ", "__builtin_clz support",
"INSIDE_MAIN", NULL, NULL,
"return __builtin_clz(1) == (sizeof(int)*8 - 1) ? 0 : 1;" },
{ "HAVE_BUILTIN_CLZL", "__builtin_clzl support",
"INSIDE_MAIN", NULL, NULL,
"return __builtin_clzl(1) == (sizeof(long)*8 - 1) ? 0 : 1;" },
{ "HAVE_BUILTIN_CLZLL", "__builtin_clzll support",
"INSIDE_MAIN", NULL, NULL,
"return __builtin_clzll(1) == (sizeof(long long)*8 - 1) ? 0 : 1;" },
{ "HAVE_BUILTIN_CTZ", "__builtin_ctz support",
"INSIDE_MAIN", NULL, NULL,
"return __builtin_ctz(1 << (sizeof(int)*8 - 1)) == (sizeof(int)*8 - 1) ? 0 : 1;" },
{ "HAVE_BUILTIN_CTZL", "__builtin_ctzl support",
"INSIDE_MAIN", NULL, NULL,
"return __builtin_ctzl(1UL << (sizeof(long)*8 - 1)) == (sizeof(long)*8 - 1) ? 0 : 1;" },
{ "HAVE_BUILTIN_CTZLL", "__builtin_ctzll support",
"INSIDE_MAIN", NULL, NULL,
"return __builtin_ctzll(1ULL << (sizeof(long long)*8 - 1)) == (sizeof(long long)*8 - 1) ? 0 : 1;" },
{ "HAVE_BUILTIN_CONSTANT_P", "__builtin_constant_p support",
"INSIDE_MAIN", NULL, NULL,
"return __builtin_constant_p(1) ? 0 : 1;" },
{ "HAVE_BUILTIN_EXPECT", "__builtin_expect support",
"INSIDE_MAIN", NULL, NULL,
"return __builtin_expect(argc == 1, 1) ? 0 : 1;" },
{ "HAVE_BUILTIN_FFS", "__builtin_ffs support",
"INSIDE_MAIN", NULL, NULL,
"return __builtin_ffs(0) == 0 ? 0 : 1;" },
{ "HAVE_BUILTIN_FFSL", "__builtin_ffsl support",
"INSIDE_MAIN", NULL, NULL,
"return __builtin_ffsl(0L) == 0 ? 0 : 1;" },
{ "HAVE_BUILTIN_FFSLL", "__builtin_ffsll support",
"INSIDE_MAIN", NULL, NULL,
"return __builtin_ffsll(0LL) == 0 ? 0 : 1;" },
{ "HAVE_BUILTIN_POPCOUNT", "__builtin_popcount support",
"INSIDE_MAIN", NULL, NULL,
"return __builtin_popcount(255) == 8 ? 0 : 1;" },
{ "HAVE_BUILTIN_POPCOUNTL", "__builtin_popcountl support",
"INSIDE_MAIN", NULL, NULL,
"return __builtin_popcountl(255L) == 8 ? 0 : 1;" },
{ "HAVE_BUILTIN_POPCOUNTLL", "__builtin_popcountll support",
"INSIDE_MAIN", NULL, NULL,
"return __builtin_popcountll(255LL) == 8 ? 0 : 1;" },
{ "HAVE_BUILTIN_TYPES_COMPATIBLE_P", "__builtin_types_compatible_p support",
"INSIDE_MAIN", NULL, NULL,
"return __builtin_types_compatible_p(char *, int) ? 1 : 0;" },
{ "HAVE_ICCARM_INTRINSICS", "<intrinsics.h>",
"DEFINES_FUNC", NULL, NULL,
"#include <intrinsics.h>\n"
"int func(int v) {\n"
" return __CLZ(__RBIT(v));\n"
"}" },
{ "HAVE_BYTESWAP_H", "<byteswap.h>",
"OUTSIDE_MAIN", NULL, NULL,
"#include <byteswap.h>\n" },
{ "HAVE_CLOCK_GETTIME", "clock_gettime() declaration",
"DEFINES_FUNC", "HAVE_STRUCT_TIMESPEC", NULL,
"#include <time.h>\n"
"static struct timespec func(void) {\n"
" struct timespec ts;\n"
" clock_gettime(CLOCK_REALTIME, &ts);\n"
" return ts;\n"
"}\n" },
{ "HAVE_CLOCK_GETTIME_IN_LIBRT", "clock_gettime() in librt",
"DEFINES_FUNC",
"HAVE_STRUCT_TIMESPEC !HAVE_CLOCK_GETTIME",
"-lrt",
"#include <time.h>\n"
"static struct timespec func(void) {\n"
" struct timespec ts;\n"
" clock_gettime(CLOCK_REALTIME, &ts);\n"
" return ts;\n"
"}\n",
/* This means HAVE_CLOCK_GETTIME, too */
"HAVE_CLOCK_GETTIME" },
{ "HAVE_COMPOUND_LITERALS", "compound literal support",
"INSIDE_MAIN", NULL, NULL,
"int *foo = (int[]) { 1, 2, 3, 4 };\n"
"return foo[0] ? 0 : 1;" },
{ "HAVE_FCHDIR", "fchdir support",
"DEFINES_EVERYTHING|EXECUTE|MAY_NOT_COMPILE", NULL, NULL,
"#include <sys/types.h>\n"
"#include <sys/stat.h>\n"
"#include <fcntl.h>\n"
"#include <unistd.h>\n"
"int main(void) {\n"
" int fd = open(\"..\", O_RDONLY);\n"
" return fchdir(fd) == 0 ? 0 : 1;\n"
"}\n" },
{ "HAVE_ERR_H", "<err.h>",
"DEFINES_FUNC", NULL, NULL,
"#include <err.h>\n"
"static void func(int arg) {\n"
" if (arg == 0)\n"
" err(1, \"err %u\", arg);\n"
" if (arg == 1)\n"
" errx(1, \"err %u\", arg);\n"
" if (arg == 3)\n"
" warn(\"warn %u\", arg);\n"
" if (arg == 4)\n"
" warnx(\"warn %u\", arg);\n"
"}\n" },
{ "HAVE_FILE_OFFSET_BITS", "_FILE_OFFSET_BITS to get 64-bit offsets",
"DEFINES_EVERYTHING|EXECUTE|MAY_NOT_COMPILE",
"HAVE_32BIT_OFF_T", NULL,
"#define _FILE_OFFSET_BITS 64\n"
"#include <sys/types.h>\n"
"int main(void) {\n"
" return sizeof(off_t) == 8 ? 0 : 1;\n"
"}\n" },
{ "HAVE_FOR_LOOP_DECLARATION", "for loop declaration support",
"INSIDE_MAIN", NULL, NULL,
"int ret = 1;\n"
"for (int i = 0; i < argc; i++) { ret = 0; };\n"
"return ret;" },
{ "HAVE_FLEXIBLE_ARRAY_MEMBER", "flexible array member support",
"OUTSIDE_MAIN", NULL, NULL,
"struct foo { unsigned int x; int arr[]; };" },
{ "HAVE_GETPAGESIZE", "getpagesize() in <unistd.h>",
"DEFINES_FUNC", NULL, NULL,
"#include <unistd.h>\n"
"static int func(void) { return getpagesize(); }" },
{ "HAVE_ISBLANK", "isblank() in <ctype.h>",
"DEFINES_FUNC", NULL, NULL,
"#ifndef _GNU_SOURCE\n"
"#define _GNU_SOURCE\n"
"#endif\n"
"#include <ctype.h>\n"
"static int func(void) { return isblank(' '); }" },
{ "HAVE_LITTLE_ENDIAN", "little endian",
"INSIDE_MAIN|EXECUTE", NULL, NULL,
"union { int i; char c[sizeof(int)]; } u;\n"
"u.i = 0x01020304;\n"
"return u.c[0] == 0x04 && u.c[1] == 0x03 && u.c[2] == 0x02 && u.c[3] == 0x01 ? 0 : 1;" },
{ "HAVE_MEMMEM", "memmem in <string.h>",
"DEFINES_FUNC", NULL, NULL,
"#ifndef _GNU_SOURCE\n"
"#define _GNU_SOURCE\n"
"#endif\n"
"#include <string.h>\n"
"static void *func(void *h, size_t hl, void *n, size_t nl) {\n"
"return memmem(h, hl, n, nl);"
"}\n", },
{ "HAVE_MEMRCHR", "memrchr in <string.h>",
"DEFINES_FUNC", NULL, NULL,
"#ifndef _GNU_SOURCE\n"
"#define _GNU_SOURCE\n"
"#endif\n"
"#include <string.h>\n"
"static void *func(void *s, int c, size_t n) {\n"
"return memrchr(s, c, n);"
"}\n", },
{ "HAVE_MMAP", "mmap() declaration",
"DEFINES_FUNC", NULL, NULL,
"#include <sys/mman.h>\n"
"static void *func(int fd) {\n"
" return mmap(0, 65536, PROT_READ, MAP_SHARED, fd, 0);\n"
"}" },
{ "HAVE_PROC_SELF_MAPS", "/proc/self/maps exists",
"DEFINES_EVERYTHING|EXECUTE|MAY_NOT_COMPILE", NULL, NULL,
"#include <sys/types.h>\n"
"#include <sys/stat.h>\n"
"#include <fcntl.h>\n"
"int main(void) {\n"
" return open(\"/proc/self/maps\", O_RDONLY) != -1 ? 0 : 1;\n"
"}\n" },
{ "HAVE_QSORT_R_PRIVATE_LAST", "qsort_r cmp takes trailing arg",
"DEFINES_EVERYTHING|EXECUTE|MAY_NOT_COMPILE", NULL, NULL,
"#ifndef _GNU_SOURCE\n"
"#define _GNU_SOURCE\n"
"#endif\n"
"#include <stdlib.h>\n"
"static int cmp(const void *lp, const void *rp, void *priv) {\n"
" *(unsigned int *)priv = 1;\n"
" return *(const int *)lp - *(const int *)rp; }\n"
"int main(void) {\n"
" int array[] = { 9, 2, 5 };\n"
" unsigned int called = 0;\n"
" qsort_r(array, 3, sizeof(int), cmp, &called);\n"
" return called && array[0] == 2 && array[1] == 5 && array[2] == 9 ? 0 : 1;\n"
"}\n" },
{ "HAVE_STRUCT_TIMESPEC", "struct timespec declaration",
"DEFINES_FUNC", NULL, NULL,
"#include <time.h>\n"
"static void func(void) {\n"
" struct timespec ts;\n"
" ts.tv_sec = ts.tv_nsec = 1;\n"
"}\n" },
{ "HAVE_SECTION_START_STOP", "__attribute__((section)) and __start/__stop",
"DEFINES_FUNC", NULL, NULL,
"static void *__attribute__((__section__(\"mysec\"))) p = &p;\n"
"static int func(void) {\n"
" extern void *__start_mysec[], *__stop_mysec[];\n"
" return __stop_mysec - __start_mysec;\n"
"}\n" },
{ "HAVE_STACK_GROWS_UPWARDS", "stack grows upwards",
"DEFINES_EVERYTHING|EXECUTE", NULL, NULL,
"#include <stddef.h>\n"
"static ptrdiff_t nest(const void *base, unsigned int i)\n"
"{\n"
" if (i == 0)\n"
" return (const char *)&i - (const char *)base;\n"
" return nest(base, i-1);\n"
"}\n"
"int main(int argc, char *argv[]) {\n"
" (void)argv;\n"
" return (nest(&argc, argc) > 0) ? 0 : 1;\n"
"}\n" },
{ "HAVE_STATEMENT_EXPR", "statement expression support",
"INSIDE_MAIN", NULL, NULL,
"return ({ int x = argc; x == argc ? 0 : 1; });" },
{ "HAVE_SYS_FILIO_H", "<sys/filio.h>",
"OUTSIDE_MAIN", NULL, NULL, /* Solaris needs this for FIONREAD */
"#include <sys/filio.h>\n" },
{ "HAVE_SYS_TERMIOS_H", "<sys/termios.h>",
"OUTSIDE_MAIN", NULL, NULL,
"#include <sys/termios.h>\n" },
{ "HAVE_SYS_UNISTD_H", "<sys/unistd.h>",
"OUTSIDE_MAIN", NULL, NULL,
"#include <sys/unistd.h>\n" },
{ "HAVE_TYPEOF", "__typeof__ support",
"INSIDE_MAIN", NULL, NULL,
"__typeof__(argc) i; i = argc; return i == argc ? 0 : 1;" },
{ "HAVE_UNALIGNED_ACCESS", "unaligned access to int",
"DEFINES_EVERYTHING|EXECUTE", NULL, NULL,
"#include <string.h>\n"
"int main(int argc, char *argv[]) {\n"
" (void)argc;\n"
" char pad[sizeof(int *) * 1];\n"
" strncpy(pad, argv[0], sizeof(pad));\n"
" int *x = (int *)pad, *y = (int *)(pad + 1);\n"
" return *x == *y;\n"
"}\n" },
{ "HAVE_UTIME", "utime() declaration",
"DEFINES_FUNC", NULL, NULL,
"#include <sys/types.h>\n"
"#include <utime.h>\n"
"static int func(const char *filename) {\n"
" struct utimbuf times = { 0 };\n"
" return utime(filename, ×);\n"
"}" },
{ "HAVE_WARN_UNUSED_RESULT", "__attribute__((warn_unused_result))",
"DEFINES_FUNC", NULL, NULL,
"#include <sys/types.h>\n"
"#include <utime.h>\n"
"static __attribute__((warn_unused_result)) int func(int i) {\n"
" return i + 1;\n"
"}" },
{ "HAVE_OPENMP", "#pragma omp and -fopenmp support",
"INSIDE_MAIN", NULL, NULL,
"int i;\n"
"#pragma omp parallel for\n"
"for(i = 0; i < 0; i++) {};\n"
"return 0;\n",
"-Werror -fopenmp" },
{ "HAVE_VALGRIND_MEMCHECK_H", "<valgrind/memcheck.h>",
"OUTSIDE_MAIN", NULL, NULL,
"#include <valgrind/memcheck.h>\n" },
{ "HAVE_UCONTEXT", "working <ucontext.h",
"DEFINES_EVERYTHING|EXECUTE|MAY_NOT_COMPILE",
NULL, NULL,
"#include <ucontext.h>\n"
"static int x = 0;\n"
"static char stack[2048];\n"
"static ucontext_t a, b;\n"
"static void fn(void) {\n"
" x |= 2;\n"
" setcontext(&b);\n"
" x |= 4;\n"
"}\n"
"int main(void) {\n"
" x |= 1;\n"
" getcontext(&a);\n"
" a.uc_stack.ss_sp = stack;\n"
" a.uc_stack.ss_size = sizeof(stack);\n"
" makecontext(&a, fn, 0);\n"
" swapcontext(&b, &a);\n"
" return (x == 3) ? 0 : 1;\n"
"}\n"
},
{ "HAVE_POINTER_SAFE_MAKECONTEXT", "passing pointers via makecontext()",
"DEFINES_EVERYTHING|EXECUTE|MAY_NOT_COMPILE",
"HAVE_UCONTEXT", NULL,
"#include <stddef.h>\n"
"#include <ucontext.h>\n"
"static int worked = 0;\n"
"static char stack[1024];\n"
"static ucontext_t a, b;\n"
"static void fn(void *p, void *q) {\n"
" void *cp = &worked;\n"
" void *cq = (void *)(~((ptrdiff_t)cp));\n"
" if ((p == cp) && (q == cq))\n"
" worked = 1;\n"
" setcontext(&b);\n"
"}\n"
"int main(void) {\n"
" void *ap = &worked;\n"
" void *aq = (void *)(~((ptrdiff_t)ap));\n"
" getcontext(&a);\n"
" a.uc_stack.ss_sp = stack;\n"
" a.uc_stack.ss_size = sizeof(stack);\n"
" makecontext(&a, (void (*)(void))fn, 2, ap, aq);\n"
" swapcontext(&b, &a);\n"
" return worked ? 0 : 1;\n"
"}\n"
},
};
static void c12r_err(int eval, const char *fmt, ...)
{
int err_errno = errno;
va_list ap;
fprintf(stderr, "%s: ", progname);
va_start(ap, fmt);
vfprintf(stderr, fmt, ap);
va_end(ap);
fprintf(stderr, ": %s\n", strerror(err_errno));
exit(eval);
}
static void c12r_errx(int eval, const char *fmt, ...)
{
va_list ap;
fprintf(stderr, "%s: ", progname);
va_start(ap, fmt);
vfprintf(stderr, fmt, ap);
va_end(ap);
fprintf(stderr, "\n");
exit(eval);
}
static void start_test(const char *what, const char *why)
{
if (like_a_libtool) {
printf("%s%s... ", what, why);
fflush(stdout);
}
}
static void end_test(bool result)
{
if (like_a_libtool)
printf("%s\n", result ? "yes" : "no");
}
static size_t fcopy(FILE *fsrc, FILE *fdst)
{
char buffer[BUFSIZ];
size_t rsize, wsize;
size_t copied = 0;
while ((rsize = fread(buffer, 1, BUFSIZ, fsrc)) > 0) {
wsize = fwrite(buffer, 1, rsize, fdst);
copied += wsize;
if (wsize != rsize)
break;
}
return copied;
}
static char *grab_stream(FILE *file)
{
size_t max, ret, size = 0;
char *buffer;
max = BUFSIZ;
buffer = malloc(max);
while ((ret = fread(buffer+size, 1, max - size, file)) == max - size) {
size += ret;
buffer = realloc(buffer, max *= 2);
}
size += ret;
if (ferror(file))
c12r_err(EXIT_TROUBLE_RUNNING, "reading from command");
buffer[size] = '\0';
return buffer;
}
static char *run(const char *cmd, int *exitstatus)
{
static const char redir[] = " 2>&1";
size_t cmdlen;
char *cmdredir;
FILE *cmdout;
char *ret;
cmdlen = strlen(cmd);
cmdredir = malloc(cmdlen + sizeof(redir));
memcpy(cmdredir, cmd, cmdlen);
memcpy(cmdredir + cmdlen, redir, sizeof(redir));
cmdout = popen(cmdredir, "r");
if (!cmdout)
c12r_err(EXIT_TROUBLE_RUNNING, "popen \"%s\"", cmdredir);
free(cmdredir);
ret = grab_stream(cmdout);
*exitstatus = pclose(cmdout);
return ret;
}
static char *connect_args(const char *argv[], const char *outflag,
const char *files)
{
unsigned int i;
char *ret;
size_t len = strlen(outflag) + strlen(files) + 1;
for (i = 1; argv[i]; i++)
len += 1 + strlen(argv[i]);
ret = malloc(len);
len = 0;
for (i = 1; argv[i]; i++) {
strcpy(ret + len, argv[i]);
len += strlen(argv[i]);
if (argv[i+1] || *outflag)
ret[len++] = ' ';
}
strcpy(ret + len, outflag);
len += strlen(outflag);
strcpy(ret + len, files);
return ret;
}
static struct test *find_test(const char *name)
{
unsigned int i;
for (i = 0; tests[i].name; i++) {
if (strcmp(tests[i].name, name) == 0)
return &tests[i];
}
c12r_errx(EXIT_BAD_TEST, "Unknown test %s", name);
abort();
}
#define PRE_BOILERPLATE "/* Test program generated by configurator. */\n"
#define MAIN_START_BOILERPLATE \
"int main(int argc, char *argv[]) {\n" \
" (void)argc;\n" \
" (void)argv;\n"
#define USE_FUNC_BOILERPLATE "(void)func;\n"
#define MAIN_BODY_BOILERPLATE "return 0;\n"
#define MAIN_END_BOILERPLATE "}\n"
static bool run_test(const char *cmd, struct test *test)
{
char *output, *newcmd;
FILE *outf;
int status;
if (test->done)
return test->answer;
if (test->depends) {
size_t len;
const char *deps = test->depends;
char *dep;
/* Space-separated dependencies, could be ! for inverse. */
while ((len = strcspn(deps, " ")) != 0) {
bool positive = true;
if (deps[len]) {
dep = strdup(deps);
dep[len] = '\0';
} else {
dep = (char *)deps;
}
if (dep[0] == '!') {
dep++;
positive = false;
}
if (run_test(cmd, find_test(dep)) != positive) {
test->answer = false;
test->done = true;
return test->answer;
}
if (deps[len])
free(dep);
deps += len;
deps += strspn(deps, " ");
}
}
outf = fopen(INPUT_FILE, verbose > 1 ? "w+" : "w");
if (!outf)
c12r_err(EXIT_TROUBLE_RUNNING, "creating %s", INPUT_FILE);
fprintf(outf, "%s", PRE_BOILERPLATE);
if (strstr(test->style, "INSIDE_MAIN")) {
fprintf(outf, "%s", MAIN_START_BOILERPLATE);
fprintf(outf, "%s", test->fragment);
fprintf(outf, "%s", MAIN_END_BOILERPLATE);
} else if (strstr(test->style, "OUTSIDE_MAIN")) {
fprintf(outf, "%s", test->fragment);
fprintf(outf, "%s", MAIN_START_BOILERPLATE);
fprintf(outf, "%s", MAIN_BODY_BOILERPLATE);
fprintf(outf, "%s", MAIN_END_BOILERPLATE);
} else if (strstr(test->style, "DEFINES_FUNC")) {
fprintf(outf, "%s", test->fragment);
fprintf(outf, "%s", MAIN_START_BOILERPLATE);
fprintf(outf, "%s", USE_FUNC_BOILERPLATE);
fprintf(outf, "%s", MAIN_BODY_BOILERPLATE);
fprintf(outf, "%s", MAIN_END_BOILERPLATE);
} else if (strstr(test->style, "DEFINES_EVERYTHING")) {
fprintf(outf, "%s", test->fragment);
} else
c12r_errx(EXIT_BAD_TEST, "Unknown style for test %s: %s",
test->name, test->style);
if (verbose > 1) {
fseek(outf, 0, SEEK_SET);
fcopy(outf, stdout);
}
fclose(outf);
newcmd = strdup(cmd);
if (test->flags) {
newcmd = realloc(newcmd, strlen(newcmd) + strlen(" ")
+ strlen(test->flags) + 1);
strcat(newcmd, " ");
strcat(newcmd, test->flags);
if (verbose > 1)
printf("Extra flags line: %s", newcmd);
}
if (test->link) {
newcmd = realloc(newcmd, strlen(newcmd) + strlen(" ")
+ strlen(test->link) + 1);
strcat(newcmd, " ");
strcat(newcmd, test->link);
if (verbose > 1)
printf("Extra link line: %s", newcmd);
}
start_test("checking for ", test->desc);
output = run(newcmd, &status);
free(newcmd);
if (status != 0 || strstr(output, "warning")) {
if (verbose)
printf("Compile %s for %s, status %i: %s\n",
status ? "fail" : "warning",
test->name, status, output);
if (strstr(test->style, "EXECUTE")
&& !strstr(test->style, "MAY_NOT_COMPILE"))
c12r_errx(EXIT_BAD_TEST,
"Test for %s did not compile:\n%s",
test->name, output);
test->answer = false;
free(output);
} else {
/* Compile succeeded. */
free(output);
/* We run INSIDE_MAIN tests for sanity checking. */
if (strstr(test->style, "EXECUTE")
|| strstr(test->style, "INSIDE_MAIN")) {
output = run("." DIR_SEP OUTPUT_FILE, &status);
if (!strstr(test->style, "EXECUTE") && status != 0)
c12r_errx(EXIT_BAD_TEST,
"Test for %s failed with %i:\n%s",
test->name, status, output);
if (verbose && status)
printf("%s exited %i\n", test->name, status);
free(output);
}
test->answer = (status == 0);
}
test->done = true;
end_test(test->answer);
if (test->answer && test->overrides) {
struct test *override = find_test(test->overrides);
override->done = true;
override->answer = true;
}
return test->answer;
}
static char *any_field(char **fieldname)
{
char buf[1000];
for (;;) {
char *p, *eq;
if (!fgets(buf, sizeof(buf), stdin))
return NULL;
p = buf;
/* Ignore whitespace, lines starting with # */
while (*p == ' ' || *p == '\t')
p++;
if (*p == '#' || *p == '\n')
continue;
eq = strchr(p, '=');
if (!eq)
c12r_errx(EXIT_BAD_INPUT, "no = in line: %s", p);
*eq = '\0';
*fieldname = strdup(p);
p = eq + 1;
if (strlen(p) && p[strlen(p)-1] == '\n')
p[strlen(p)-1] = '\0';
return strdup(p);
}
}
static char *read_field(const char *name, bool compulsory)
{
char *fieldname, *value;
value = any_field(&fieldname);
if (!value) {
if (!compulsory)
return NULL;
c12r_errx(EXIT_BAD_INPUT, "Could not read field %s", name);
}
if (strcmp(fieldname, name) != 0)
c12r_errx(EXIT_BAD_INPUT,
"Expected field %s not %s", name, fieldname);
return value;
}
/* Test descriptions from stdin:
* Lines starting with # or whitespace-only are ignored.
*
* First three non-ignored lines must be:
* var=<varname>
* desc=<description-for-autotools-style>
* style=OUTSIDE_MAIN DEFINES_FUNC INSIDE_MAIN DEFINES_EVERYTHING EXECUTE MAY_NOT_COMPILE
*
* Followed by optional lines:
* depends=<space-separated-testnames, ! to invert>
* link=<extra args for link line>
* flags=<extra args for compile line>
* overrides=<testname-to-force>
*
* Finally a code line, either:
* code=<oneline> OR
* code=
* <lines of code>
* <end-comment>
*
* And <end-comment> looks like this next comment: */
/*END*/
static bool read_test(struct test *test)
{
char *field, *value;
char buf[1000];
memset(test, 0, sizeof(*test));
test->name = read_field("var", false);
if (!test->name)
return false;
test->desc = read_field("desc", true);
test->style = read_field("style", true);
/* Read any optional fields. */
while ((value = any_field(&field)) != NULL) {
if (strcmp(field, "depends") == 0)
test->depends = value;
else if (strcmp(field, "link") == 0)
test->link = value;
else if (strcmp(field, "flags") == 0)
test->flags = value;
else if (strcmp(field, "overrides") == 0)
test->overrides = value;
else if (strcmp(field, "code") == 0)
break;
else
c12r_errx(EXIT_BAD_INPUT, "Unknown field %s in %s",
field, test->name);
}
if (!value)
c12r_errx(EXIT_BAD_INPUT, "Missing code in %s", test->name);
if (strlen(value) == 0) {
/* Multiline program, read to END comment */
while (fgets(buf, sizeof(buf), stdin) != 0) {
size_t n;
if (strncmp(buf, "/*END*/", 7) == 0)
break;
n = strlen(value);
value = realloc(value, n + strlen(buf) + 1);
strcpy(value + n, buf);
n += strlen(buf);
}
}
test->fragment = value;
return true;
}
static void read_tests(size_t num_tests)
{
while (read_test(tests + num_tests)) {
num_tests++;
tests = realloc(tests, num_tests * sizeof(tests[0]));
}
}
int main(int argc, const char *argv[])
{
char *cmd;
unsigned int i;
const char *default_args[]
= { "", DEFAULT_COMPILER, DEFAULT_FLAGS, NULL };
const char *outflag = DEFAULT_OUTPUT_EXE_FLAG;
const char *configurator_cc = NULL;
const char *orig_cc;
const char *varfile = NULL;
const char *headerfile = NULL;
bool extra_tests = false;
FILE *outf;
if (argc > 0)
progname = argv[0];
while (argc > 1) {
if (strcmp(argv[1], "--help") == 0) {
printf("Usage: configurator [-v] [--var-file=<filename>] [-O<outflag>] [--configurator-cc=<compiler-for-tests>] [--autotools-style] [--extra-tests] [<compiler> <flags>...]\n"
" <compiler> <flags> will have \"<outflag> <outfile> <infile.c>\" appended\n"
"Default: %s %s %s\n",
DEFAULT_COMPILER, DEFAULT_FLAGS,
DEFAULT_OUTPUT_EXE_FLAG);
exit(0);
}
if (strncmp(argv[1], "-O", 2) == 0) {
argc--;
argv++;
outflag = argv[1] + 2;
if (!*outflag) {
fprintf(stderr,
"%s: option requires an argument -- O\n",
argv[0]);
exit(EXIT_BAD_USAGE);
}
} else if (strcmp(argv[1], "-v") == 0) {
argc--;
argv++;
verbose++;
} else if (strcmp(argv[1], "-vv") == 0) {
argc--;
argv++;
verbose += 2;
} else if (strncmp(argv[1], "--configurator-cc=", 18) == 0) {
configurator_cc = argv[1] + 18;
argc--;
argv++;
} else if (strncmp(argv[1], "--var-file=", 11) == 0) {
varfile = argv[1] + 11;
argc--;
argv++;
} else if (strcmp(argv[1], "--autotools-style") == 0) {
like_a_libtool = true;
argc--;
argv++;
} else if (strncmp(argv[1], "--header-file=", 14) == 0) {
headerfile = argv[1] + 14;
argc--;
argv++;
} else if (strcmp(argv[1], "--extra-tests") == 0) {
extra_tests = true;
argc--;
argv++;
} else if (strcmp(argv[1], "--") == 0) {
break;
} else if (argv[1][0] == '-') {
c12r_errx(EXIT_BAD_USAGE, "Unknown option %s", argv[1]);
} else {
break;
}
}
if (argc == 1)
argv = default_args;
/* Copy with NULL entry at end */
tests = calloc(sizeof(base_tests)/sizeof(base_tests[0]) + 1,
sizeof(base_tests[0]));
memcpy(tests, base_tests, sizeof(base_tests));
if (extra_tests)
read_tests(sizeof(base_tests)/sizeof(base_tests[0]));
orig_cc = argv[1];
if (configurator_cc)
argv[1] = configurator_cc;
cmd = connect_args(argv, outflag, OUTPUT_FILE " " INPUT_FILE);
if (like_a_libtool) {
start_test("Making autoconf users comfortable", "");
sleep(1);
end_test(1);
}
for (i = 0; tests[i].name; i++)
run_test(cmd, &tests[i]);
free(cmd);
remove(OUTPUT_FILE);
remove(INPUT_FILE);
if (varfile) {
FILE *vars;
if (strcmp(varfile, "-") == 0)
vars = stdout;
else {
start_test("Writing variables to ", varfile);
vars = fopen(varfile, "a");
if (!vars)
c12r_err(EXIT_TROUBLE_RUNNING,
"Could not open %s", varfile);
}
for (i = 0; tests[i].name; i++)
fprintf(vars, "%s=%u\n", tests[i].name, tests[i].answer);
if (vars != stdout) {
if (fclose(vars) != 0)
c12r_err(EXIT_TROUBLE_RUNNING,
"Closing %s", varfile);
end_test(1);
}
}
if (headerfile) {
start_test("Writing header to ", headerfile);
outf = fopen(headerfile, "w");
if (!outf)
c12r_err(EXIT_TROUBLE_RUNNING,
"Could not open %s", headerfile);
} else
outf = stdout;
fprintf(outf, "/* Generated by CCAN configurator */\n"
"#ifndef CCAN_CONFIG_H\n"
"#define CCAN_CONFIG_H\n");
fprintf(outf, "#ifndef _GNU_SOURCE\n");
fprintf(outf, "#define _GNU_SOURCE /* Always use GNU extensions. */\n");
fprintf(outf, "#endif\n");
fprintf(outf, "#define CCAN_COMPILER \"%s\"\n", orig_cc);
cmd = connect_args(argv + 1, "", "");
fprintf(outf, "#define CCAN_CFLAGS \"%s\"\n", cmd);
free(cmd);
fprintf(outf, "#define CCAN_OUTPUT_EXE_CFLAG \"%s\"\n\n", outflag);
/* This one implies "#include <ccan/..." works, eg. for tdb2.h */
fprintf(outf, "#define HAVE_CCAN 1\n");
for (i = 0; tests[i].name; i++)
fprintf(outf, "#define %s %u\n", tests[i].name, tests[i].answer);
fprintf(outf, "#endif /* CCAN_CONFIG_H */\n");
if (headerfile) {
if (fclose(outf) != 0)
c12r_err(EXIT_TROUBLE_RUNNING, "Closing %s", headerfile);
end_test(1);
}
return 0;
}
|
UMESimdVecUintPrototype.h | // The MIT License (MIT)
//
// Copyright (c) 2015-2017 CERN
//
// Author: Przemyslaw Karpinski
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
//
//
// This piece of code was developed as part of ICE-DIP project at CERN.
// "ICE-DIP is a European Industrial Doctorate project funded by the European Community's
// 7th Framework programme Marie Curie Actions under grant PITN-GA-2012-316596".
//
#ifndef UME_SIMD_VEC_UINT_PROTOTYPE_H_
#define UME_SIMD_VEC_UINT_PROTOTYPE_H_
#include <type_traits>
#include "../../../UMESimdInterface.h"
#include "../UMESimdMask.h"
#include "../UMESimdSwizzle.h"
namespace UME {
namespace SIMD {
// ********************************************************************************************
// UNSIGNED INTEGER VECTORS
// ********************************************************************************************
template<typename VEC_TYPE, uint32_t VEC_LEN>
struct SIMDVec_u_traits {
// Generic trait class not containing type definition so that only correct explicit
// type definitions are compiled correctly
};
// 8b vectors
template<>
struct SIMDVec_u_traits<uint8_t, 1> {
typedef NullType<1> HALF_LEN_VEC_TYPE;
typedef int8_t SCALAR_INT_TYPE;
typedef NullType<2> SCALAR_FLOAT_TYPE;
typedef SIMDVecMask<1> MASK_TYPE;
typedef SIMDSwizzle<1> SWIZZLE_MASK_TYPE;
typedef NullType<3> SCALAR_UINT_LOWER_PRECISION;
typedef uint16_t SCALAR_UINT_HIGHER_PRECISION;
};
// 16b vectors
template<>
struct SIMDVec_u_traits<uint8_t, 2> {
typedef SIMDVec_u<uint8_t, 1> HALF_LEN_VEC_TYPE;
typedef int8_t SCALAR_INT_TYPE;
typedef NullType<1> SCALAR_FLOAT_TYPE;
typedef SIMDVecMask<2> MASK_TYPE;
typedef SIMDSwizzle<2> SWIZZLE_MASK_TYPE;
typedef NullType<2> SCALAR_UINT_LOWER_PRECISION;
typedef uint16_t SCALAR_UINT_HIGHER_PRECISION;
};
template<>
struct SIMDVec_u_traits<uint16_t, 1> {
typedef NullType<1> HALF_LEN_VEC_TYPE;
typedef int16_t SCALAR_INT_TYPE;
typedef NullType<2> SCALAR_FLOAT_TYPE;
typedef SIMDVecMask<1> MASK_TYPE;
typedef SIMDSwizzle<1> SWIZZLE_MASK_TYPE;
typedef uint8_t SCALAR_UINT_LOWER_PRECISION;
typedef uint32_t SCALAR_UINT_HIGHER_PRECISION;
};
// 32b vectors
template<>
struct SIMDVec_u_traits<uint8_t, 4> {
typedef SIMDVec_u<uint8_t, 2> HALF_LEN_VEC_TYPE;
typedef int8_t SCALAR_INT_TYPE;
typedef NullType<1> SCALAR_FLOAT_TYPE;
typedef SIMDVecMask<4> MASK_TYPE;
typedef SIMDSwizzle<4> SWIZZLE_MASK_TYPE;
typedef NullType<2> SCALAR_UINT_LOWER_PRECISION;
typedef uint16_t SCALAR_UINT_HIGHER_PRECISION;
};
template<>
struct SIMDVec_u_traits<uint16_t, 2> {
typedef SIMDVec_u<uint16_t, 1> HALF_LEN_VEC_TYPE;
typedef int16_t SCALAR_INT_TYPE;
typedef NullType<1> SCALAR_FLOAT_TYPE;
typedef SIMDVecMask<2> MASK_TYPE;
typedef SIMDSwizzle<2> SWIZZLE_MASK_TYPE;
typedef uint8_t SCALAR_UINT_LOWER_PRECISION;
typedef uint32_t SCALAR_UINT_HIGHER_PRECISION;
};
template<>
struct SIMDVec_u_traits<uint32_t, 1> {
typedef NullType<1> HALF_LEN_VEC_TYPE;
typedef int32_t SCALAR_INT_TYPE;
typedef float SCALAR_FLOAT_TYPE;
typedef SIMDVecMask<1> MASK_TYPE;
typedef SIMDSwizzle<1> SWIZZLE_MASK_TYPE;
typedef uint16_t SCALAR_UINT_LOWER_PRECISION;
typedef uint64_t SCALAR_UINT_HIGHER_PRECISION;
};
// 64b vectors
template<>
struct SIMDVec_u_traits<uint8_t, 8> {
typedef SIMDVec_u<uint8_t, 4> HALF_LEN_VEC_TYPE;
typedef int8_t SCALAR_INT_TYPE;
typedef NullType<1> SCALAR_FLOAT_TYPE;
typedef SIMDVecMask<8> MASK_TYPE;
typedef SIMDSwizzle<8> SWIZZLE_MASK_TYPE;
typedef NullType<2> SCALAR_UINT_LOWER_PRECISION;
typedef uint16_t SCALAR_UINT_HIGHER_PRECISION;
};
template<>
struct SIMDVec_u_traits<uint16_t, 4> {
typedef SIMDVec_u<uint16_t, 2> HALF_LEN_VEC_TYPE;
typedef int16_t SCALAR_INT_TYPE;
typedef NullType<1> SCALAR_FLOAT_TYPE;
typedef SIMDVecMask<4> MASK_TYPE;
typedef SIMDSwizzle<4> SWIZZLE_MASK_TYPE;
typedef uint8_t SCALAR_UINT_LOWER_PRECISION;
typedef uint32_t SCALAR_UINT_HIGHER_PRECISION;
};
template<>
struct SIMDVec_u_traits<uint32_t, 2> {
typedef SIMDVec_u<uint32_t, 1> HALF_LEN_VEC_TYPE;
typedef int32_t SCALAR_INT_TYPE;
typedef float SCALAR_FLOAT_TYPE;
typedef SIMDVecMask<2> MASK_TYPE;
typedef SIMDSwizzle<2> SWIZZLE_MASK_TYPE;
typedef uint16_t SCALAR_UINT_LOWER_PRECISION;
typedef uint64_t SCALAR_UINT_HIGHER_PRECISION;
};
template<>
struct SIMDVec_u_traits<uint64_t, 1> {
typedef NullType<1> HALF_LEN_VEC_TYPE;
typedef int64_t SCALAR_INT_TYPE;
typedef double SCALAR_FLOAT_TYPE;
typedef SIMDVecMask<1> MASK_TYPE;
typedef SIMDSwizzle<1> SWIZZLE_MASK_TYPE;
typedef uint32_t SCALAR_UINT_LOWER_PRECISION;
typedef NullType<2> SCALAR_UINT_HIGHER_PRECISION;
};
// 128b vectors
template<>
struct SIMDVec_u_traits<uint8_t, 16> {
typedef SIMDVec_u<uint8_t, 8> HALF_LEN_VEC_TYPE;
typedef int8_t SCALAR_INT_TYPE;
typedef NullType<1> SCALAR_FLOAT_TYPE;
typedef SIMDVecMask<16> MASK_TYPE;
typedef SIMDSwizzle<16> SWIZZLE_MASK_TYPE;
typedef NullType<2> SCALAR_UINT_LOWER_PRECISION;
typedef uint16_t SCALAR_UINT_HIGHER_PRECISION;
};
template<>
struct SIMDVec_u_traits<uint16_t, 8> {
typedef SIMDVec_u<uint16_t, 4> HALF_LEN_VEC_TYPE;
typedef int16_t SCALAR_INT_TYPE;
typedef NullType<1> SCALAR_FLOAT_TYPE;
typedef SIMDVecMask<8> MASK_TYPE;
typedef SIMDSwizzle<8> SWIZZLE_MASK_TYPE;
typedef uint8_t SCALAR_UINT_LOWER_PRECISION;
typedef uint32_t SCALAR_UINT_HIGHER_PRECISION;
};
template<>
struct SIMDVec_u_traits<uint32_t, 4> {
typedef SIMDVec_u<uint32_t, 2> HALF_LEN_VEC_TYPE;
typedef int32_t SCALAR_INT_TYPE;
typedef float SCALAR_FLOAT_TYPE;
typedef SIMDVecMask<4> MASK_TYPE;
typedef SIMDSwizzle<4> SWIZZLE_MASK_TYPE;
typedef uint16_t SCALAR_UINT_LOWER_PRECISION;
typedef uint64_t SCALAR_UINT_HIGHER_PRECISION;
};
template<>
struct SIMDVec_u_traits<uint64_t, 2> {
typedef SIMDVec_u<uint64_t, 1> HALF_LEN_VEC_TYPE;
typedef int64_t SCALAR_INT_TYPE;
typedef double SCALAR_FLOAT_TYPE;
typedef SIMDVecMask<2> MASK_TYPE;
typedef SIMDSwizzle<2> SWIZZLE_MASK_TYPE;
typedef uint32_t SCALAR_UINT_LOWER_PRECISION;
typedef NullType<1> SCALAR_UINT_HIGHER_PRECISION;
};
// 256b vectors
template<>
struct SIMDVec_u_traits<uint8_t, 32> {
typedef SIMDVec_u<uint8_t, 16> HALF_LEN_VEC_TYPE;
typedef int8_t SCALAR_INT_TYPE;
typedef NullType<1> SCALAR_FLOAT_TYPE;
typedef SIMDVecMask<32> MASK_TYPE;
typedef SIMDSwizzle<32> SWIZZLE_MASK_TYPE;
typedef NullType<2> SCALAR_UINT_LOWER_PRECISION;
typedef uint16_t SCALAR_UINT_HIGHER_PRECISION;
};
template<>
struct SIMDVec_u_traits<uint16_t, 16> {
typedef SIMDVec_u<uint16_t, 8> HALF_LEN_VEC_TYPE;
typedef int16_t SCALAR_INT_TYPE;
typedef NullType<1> SCALAR_FLOAT_TYPE;
typedef SIMDVecMask<16> MASK_TYPE;
typedef SIMDSwizzle<16> SWIZZLE_MASK_TYPE;
typedef uint8_t SCALAR_UINT_LOWER_PRECISION;
typedef uint32_t SCALAR_UINT_HIGHER_PRECISION;
};
template<>
struct SIMDVec_u_traits<uint32_t, 8> {
typedef SIMDVec_u<uint32_t, 4> HALF_LEN_VEC_TYPE;
typedef int32_t SCALAR_INT_TYPE;
typedef float SCALAR_FLOAT_TYPE;
typedef SIMDVecMask<8> MASK_TYPE;
typedef SIMDSwizzle<8> SWIZZLE_MASK_TYPE;
typedef uint16_t SCALAR_UINT_LOWER_PRECISION;
typedef uint64_t SCALAR_UINT_HIGHER_PRECISION;
};
template<>
struct SIMDVec_u_traits<uint64_t, 4> {
typedef SIMDVec_u<uint64_t, 2> HALF_LEN_VEC_TYPE;
typedef int64_t SCALAR_INT_TYPE;
typedef double SCALAR_FLOAT_TYPE;
typedef SIMDVecMask<4> MASK_TYPE;
typedef SIMDSwizzle<4> SWIZZLE_MASK_TYPE;
typedef uint32_t SCALAR_UINT_LOWER_PRECISION;
typedef NullType<2> SCALAR_UINT_HIGHER_PRECISION;
};
// 512b vectors
template<>
struct SIMDVec_u_traits<uint8_t, 64> {
typedef SIMDVec_u<uint8_t, 32> HALF_LEN_VEC_TYPE;
typedef int8_t SCALAR_INT_TYPE;
typedef NullType<1> SCALAR_FLOAT_TYPE;
typedef SIMDVecMask<64> MASK_TYPE;
typedef SIMDSwizzle<64> SWIZZLE_MASK_TYPE;
typedef NullType<2> SCALAR_UINT_LOWER_PRECISION;
typedef uint16_t SCALAR_UINT_HIGHER_PRECISION;
};
template<>
struct SIMDVec_u_traits<uint16_t, 32> {
typedef SIMDVec_u<uint16_t, 16> HALF_LEN_VEC_TYPE;
typedef int16_t SCALAR_INT_TYPE;
typedef NullType<1> SCALAR_FLOAT_TYPE;
typedef SIMDVecMask<32> MASK_TYPE;
typedef SIMDSwizzle<32> SWIZZLE_MASK_TYPE;
typedef uint8_t SCALAR_UINT_LOWER_PRECISION;
typedef uint32_t SCALAR_UINT_HIGHER_PRECISION;
};
template<>
struct SIMDVec_u_traits<uint32_t, 16> {
typedef SIMDVec_u<uint32_t, 8> HALF_LEN_VEC_TYPE;
typedef int32_t SCALAR_INT_TYPE;
typedef float SCALAR_FLOAT_TYPE;
typedef SIMDVecMask<16> MASK_TYPE;
typedef SIMDSwizzle<16> SWIZZLE_MASK_TYPE;
typedef uint16_t SCALAR_UINT_LOWER_PRECISION;
typedef uint64_t SCALAR_UINT_HIGHER_PRECISION;
};
template<>
struct SIMDVec_u_traits<uint64_t, 8> {
typedef SIMDVec_u<uint64_t, 4> HALF_LEN_VEC_TYPE;
typedef int64_t SCALAR_INT_TYPE;
typedef double SCALAR_FLOAT_TYPE;
typedef SIMDVecMask<8> MASK_TYPE;
typedef SIMDSwizzle<8> SWIZZLE_MASK_TYPE;
typedef uint32_t SCALAR_UINT_LOWER_PRECISION;
typedef NullType<1> SCALAR_UINT_HIGHER_PRECISION;
};
// 1024b vectors
template<>
struct SIMDVec_u_traits<uint8_t, 128> {
typedef SIMDVec_u<uint8_t, 64> HALF_LEN_VEC_TYPE;
typedef int8_t SCALAR_INT_TYPE;
typedef NullType<1> SCALAR_FLOAT_TYPE;
typedef SIMDVecMask<128> MASK_TYPE;
typedef SIMDSwizzle<128> SWIZZLE_MASK_TYPE;
typedef NullType<2> SCALAR_UINT_LOWER_PRECISION;
typedef NullType<3> SCALAR_UINT_HIGHER_PRECISION;
};
template<>
struct SIMDVec_u_traits<uint16_t, 64> {
typedef SIMDVec_u<uint16_t, 32> HALF_LEN_VEC_TYPE;
typedef int16_t SCALAR_INT_TYPE;
typedef NullType<1> SCALAR_FLOAT_TYPE;
typedef SIMDVecMask<64> MASK_TYPE;
typedef SIMDSwizzle<64> SWIZZLE_MASK_TYPE;
typedef uint8_t SCALAR_UINT_LOWER_PRECISION;
typedef NullType<1> SCALAR_UINT_HIGHER_PRECISION;
};
template<>
struct SIMDVec_u_traits<uint32_t, 32> {
typedef SIMDVec_u<uint32_t, 16> HALF_LEN_VEC_TYPE;
typedef int32_t SCALAR_INT_TYPE;
typedef float SCALAR_FLOAT_TYPE;
typedef SIMDVecMask<32> MASK_TYPE;
typedef SIMDSwizzle<32> SWIZZLE_MASK_TYPE;
typedef uint16_t SCALAR_UINT_LOWER_PRECISION;
typedef NullType<1> SCALAR_UINT_HIGHER_PRECISION;
};
template<>
struct SIMDVec_u_traits<uint64_t, 16> {
typedef SIMDVec_u<uint64_t, 8> HALF_LEN_VEC_TYPE;
typedef int64_t SCALAR_INT_TYPE;
typedef double SCALAR_FLOAT_TYPE;
typedef SIMDVecMask<16> MASK_TYPE;
typedef SIMDSwizzle<16> SWIZZLE_MASK_TYPE;
typedef uint32_t SCALAR_UINT_LOWER_PRECISION;
typedef NullType<1> SCALAR_UINT_HIGHER_PRECISION;
};
// ***************************************************************************
// *
// * Implementation of unsigned integer SIMDx_8u, SIMDx_16u, SIMDx_32u,
// * and SIMDx_64u.
// *
// * This implementation uses scalar emulation available through to
// * SIMDVecUnsignedInterface.
// *
// ***************************************************************************
template<typename SCALAR_UINT_TYPE, uint32_t VEC_LEN>
class SIMDVec_u :
public SIMDVecUnsignedInterface<
SIMDVec_u<SCALAR_UINT_TYPE, VEC_LEN>,
SCALAR_UINT_TYPE,
VEC_LEN,
typename SIMDVec_u_traits<SCALAR_UINT_TYPE, VEC_LEN>::MASK_TYPE,
typename SIMDVec_u_traits<SCALAR_UINT_TYPE, VEC_LEN>::SWIZZLE_MASK_TYPE>,
public SIMDVecPackableInterface<
SIMDVec_u<SCALAR_UINT_TYPE, VEC_LEN>,
typename SIMDVec_u_traits<SCALAR_UINT_TYPE, VEC_LEN>::HALF_LEN_VEC_TYPE>
{
public:
typedef SIMDVecEmuRegister<SCALAR_UINT_TYPE, VEC_LEN> VEC_EMU_REG;
typedef typename SIMDVec_u_traits<SCALAR_UINT_TYPE, VEC_LEN>::SCALAR_INT_TYPE SCALAR_INT_TYPE;
typedef typename SIMDVec_u_traits<SCALAR_UINT_TYPE, VEC_LEN>::SCALAR_FLOAT_TYPE SCALAR_FLOAT_TYPE;
typedef typename SIMDVec_u_traits<SCALAR_UINT_TYPE, VEC_LEN>::MASK_TYPE MASK_TYPE;
typedef typename SIMDVec_u_traits<SCALAR_UINT_TYPE, VEC_LEN>::SCALAR_UINT_LOWER_PRECISION SCALAR_UINT_LOWER_PRECISION;
typedef typename SIMDVec_u_traits<SCALAR_UINT_TYPE, VEC_LEN>::SCALAR_UINT_HIGHER_PRECISION SCALAR_UINT_HIGHER_PRECISION;
// Conversion operators require access to private members.
friend class SIMDVec_i<SCALAR_INT_TYPE, VEC_LEN>;
friend class SIMDVec_f<SCALAR_FLOAT_TYPE, VEC_LEN>;
public:
constexpr static uint32_t alignment() { return VEC_LEN*sizeof(SCALAR_UINT_TYPE); }
public: // private:
// This is the only data member and it is a low level representation of vector register.
alignas(alignment()) SCALAR_UINT_TYPE mVec[VEC_LEN];
public:
// ZERO-CONSTR
UME_FORCE_INLINE SIMDVec_u() : mVec() {};
// SET-CONSTR
UME_FORCE_INLINE SIMDVec_u(SCALAR_UINT_TYPE x) {
SCALAR_UINT_TYPE *local_ptr = &mVec[0];
#pragma omp simd safelen(VEC_LEN)
for (unsigned int i = 0; i < VEC_LEN; i++) {
local_ptr[i] = x;
}
}
// This constructor is used to force types other than SCALAR_TYPES
// to be promoted to SCALAR_TYPE instead of SCALAR_TYPE*. This prevents
// ambiguity between SET-CONSTR and LOAD-CONSTR.
template<typename T>
UME_FORCE_INLINE SIMDVec_u(
T i,
typename std::enable_if< (std::is_fundamental<T>::value) &&
!std::is_same<T, SCALAR_UINT_TYPE>::value,
void*>::type = nullptr)
: SIMDVec_u(static_cast<SCALAR_UINT_TYPE>(i)) {}
// LOAD-CONSTR - Construct by loading from memory
UME_FORCE_INLINE explicit SIMDVec_u(SCALAR_UINT_TYPE const * p) { this->load(p); }
UME_FORCE_INLINE SIMDVec_u(SCALAR_UINT_TYPE i0, SCALAR_UINT_TYPE i1) {
insert(0, i0); insert(1, i1);
}
UME_FORCE_INLINE SIMDVec_u(SCALAR_UINT_TYPE i0, SCALAR_UINT_TYPE i1, SCALAR_UINT_TYPE i2, SCALAR_UINT_TYPE i3) {
insert(0, i0); insert(1, i1); insert(2, i2); insert(3, i3);
}
UME_FORCE_INLINE SIMDVec_u(SCALAR_UINT_TYPE i0, SCALAR_UINT_TYPE i1, SCALAR_UINT_TYPE i2, SCALAR_UINT_TYPE i3, SCALAR_UINT_TYPE i4, SCALAR_UINT_TYPE i5, SCALAR_UINT_TYPE i6, SCALAR_UINT_TYPE i7)
{
insert(0, i0); insert(1, i1); insert(2, i2); insert(3, i3);
insert(4, i4); insert(5, i5); insert(6, i6); insert(7, i7);
}
UME_FORCE_INLINE SIMDVec_u(SCALAR_UINT_TYPE i0, SCALAR_UINT_TYPE i1, SCALAR_UINT_TYPE i2, SCALAR_UINT_TYPE i3, SCALAR_UINT_TYPE i4, SCALAR_UINT_TYPE i5, SCALAR_UINT_TYPE i6, SCALAR_UINT_TYPE i7,
SCALAR_UINT_TYPE i8, SCALAR_UINT_TYPE i9, SCALAR_UINT_TYPE i10, SCALAR_UINT_TYPE i11, SCALAR_UINT_TYPE i12, SCALAR_UINT_TYPE i13, SCALAR_UINT_TYPE i14, SCALAR_UINT_TYPE i15)
{
insert(0, i0); insert(1, i1); insert(2, i2); insert(3, i3);
insert(4, i4); insert(5, i5); insert(6, i6); insert(7, i7);
insert(8, i8); insert(9, i9); insert(10, i10); insert(11, i11);
insert(12, i12); insert(13, i13); insert(14, i14); insert(15, i15);
}
UME_FORCE_INLINE SIMDVec_u(SCALAR_UINT_TYPE i0, SCALAR_UINT_TYPE i1, SCALAR_UINT_TYPE i2, SCALAR_UINT_TYPE i3, SCALAR_UINT_TYPE i4, SCALAR_UINT_TYPE i5, SCALAR_UINT_TYPE i6, SCALAR_UINT_TYPE i7,
SCALAR_UINT_TYPE i8, SCALAR_UINT_TYPE i9, SCALAR_UINT_TYPE i10, SCALAR_UINT_TYPE i11, SCALAR_UINT_TYPE i12, SCALAR_UINT_TYPE i13, SCALAR_UINT_TYPE i14, SCALAR_UINT_TYPE i15,
SCALAR_UINT_TYPE i16, SCALAR_UINT_TYPE i17, SCALAR_UINT_TYPE i18, SCALAR_UINT_TYPE i19, SCALAR_UINT_TYPE i20, SCALAR_UINT_TYPE i21, SCALAR_UINT_TYPE i22, SCALAR_UINT_TYPE i23,
SCALAR_UINT_TYPE i24, SCALAR_UINT_TYPE i25, SCALAR_UINT_TYPE i26, SCALAR_UINT_TYPE i27, SCALAR_UINT_TYPE i28, SCALAR_UINT_TYPE i29, SCALAR_UINT_TYPE i30, SCALAR_UINT_TYPE i31)
{
insert(0, i0); insert(1, i1); insert(2, i2); insert(3, i3);
insert(4, i4); insert(5, i5); insert(6, i6); insert(7, i7);
insert(8, i8); insert(9, i9); insert(10, i10); insert(11, i11);
insert(12, i12); insert(13, i13); insert(14, i14); insert(15, i15);
insert(16, i16); insert(17, i17); insert(18, i18); insert(19, i19);
insert(20, i20); insert(21, i21); insert(22, i22); insert(23, i23);
insert(24, i24); insert(25, i25); insert(26, i26); insert(27, i27);
insert(28, i28); insert(29, i29); insert(30, i30); insert(31, i31);
}
UME_FORCE_INLINE SIMDVec_u(
SCALAR_UINT_TYPE i0, SCALAR_UINT_TYPE i1, SCALAR_UINT_TYPE i2, SCALAR_UINT_TYPE i3, SCALAR_UINT_TYPE i4, SCALAR_UINT_TYPE i5, SCALAR_UINT_TYPE i6, SCALAR_UINT_TYPE i7,
SCALAR_UINT_TYPE i8, SCALAR_UINT_TYPE i9, SCALAR_UINT_TYPE i10, SCALAR_UINT_TYPE i11, SCALAR_UINT_TYPE i12, SCALAR_UINT_TYPE i13, SCALAR_UINT_TYPE i14, SCALAR_UINT_TYPE i15,
SCALAR_UINT_TYPE i16, SCALAR_UINT_TYPE i17, SCALAR_UINT_TYPE i18, SCALAR_UINT_TYPE i19, SCALAR_UINT_TYPE i20, SCALAR_UINT_TYPE i21, SCALAR_UINT_TYPE i22, SCALAR_UINT_TYPE i23,
SCALAR_UINT_TYPE i24, SCALAR_UINT_TYPE i25, SCALAR_UINT_TYPE i26, SCALAR_UINT_TYPE i27, SCALAR_UINT_TYPE i28, SCALAR_UINT_TYPE i29, SCALAR_UINT_TYPE i30, SCALAR_UINT_TYPE i31,
SCALAR_UINT_TYPE i32, SCALAR_UINT_TYPE i33, SCALAR_UINT_TYPE i34, SCALAR_UINT_TYPE i35, SCALAR_UINT_TYPE i36, SCALAR_UINT_TYPE i37, SCALAR_UINT_TYPE i38, SCALAR_UINT_TYPE i39,
SCALAR_UINT_TYPE i40, SCALAR_UINT_TYPE i41, SCALAR_UINT_TYPE i42, SCALAR_UINT_TYPE i43, SCALAR_UINT_TYPE i44, SCALAR_UINT_TYPE i45, SCALAR_UINT_TYPE i46, SCALAR_UINT_TYPE i47,
SCALAR_UINT_TYPE i48, SCALAR_UINT_TYPE i49, SCALAR_UINT_TYPE i50, SCALAR_UINT_TYPE i51, SCALAR_UINT_TYPE i52, SCALAR_UINT_TYPE i53, SCALAR_UINT_TYPE i54, SCALAR_UINT_TYPE i55,
SCALAR_UINT_TYPE i56, SCALAR_UINT_TYPE i57, SCALAR_UINT_TYPE i58, SCALAR_UINT_TYPE i59, SCALAR_UINT_TYPE i60, SCALAR_UINT_TYPE i61, SCALAR_UINT_TYPE i62, SCALAR_UINT_TYPE i63)
{
insert(0, i0); insert(1, i1); insert(2, i2); insert(3, i3);
insert(4, i4); insert(5, i5); insert(6, i6); insert(7, i7);
insert(8, i8); insert(9, i9); insert(10, i10); insert(11, i11);
insert(12, i12); insert(13, i13); insert(14, i14); insert(15, i15);
insert(16, i16); insert(17, i17); insert(18, i18); insert(19, i19);
insert(20, i20); insert(21, i21); insert(22, i22); insert(23, i23);
insert(24, i24); insert(25, i25); insert(26, i26); insert(27, i27);
insert(28, i28); insert(29, i29); insert(30, i30); insert(31, i31);
insert(32, i32); insert(33, i33); insert(34, i34); insert(35, i35);
insert(36, i36); insert(37, i37); insert(38, i38); insert(39, i39);
insert(40, i40); insert(41, i41); insert(42, i42); insert(43, i43);
insert(44, i44); insert(45, i45); insert(46, i46); insert(47, i47);
insert(48, i48); insert(49, i49); insert(50, i50); insert(51, i51);
insert(52, i52); insert(53, i53); insert(54, i54); insert(55, i55);
insert(56, i56); insert(57, i57); insert(58, i58); insert(59, i59);
insert(60, i60); insert(61, i61); insert(62, i62); insert(63, i63);
}
UME_FORCE_INLINE SIMDVec_u(
SCALAR_UINT_TYPE i0, SCALAR_UINT_TYPE i1, SCALAR_UINT_TYPE i2, SCALAR_UINT_TYPE i3, SCALAR_UINT_TYPE i4, SCALAR_UINT_TYPE i5, SCALAR_UINT_TYPE i6, SCALAR_UINT_TYPE i7,
SCALAR_UINT_TYPE i8, SCALAR_UINT_TYPE i9, SCALAR_UINT_TYPE i10, SCALAR_UINT_TYPE i11, SCALAR_UINT_TYPE i12, SCALAR_UINT_TYPE i13, SCALAR_UINT_TYPE i14, SCALAR_UINT_TYPE i15,
SCALAR_UINT_TYPE i16, SCALAR_UINT_TYPE i17, SCALAR_UINT_TYPE i18, SCALAR_UINT_TYPE i19, SCALAR_UINT_TYPE i20, SCALAR_UINT_TYPE i21, SCALAR_UINT_TYPE i22, SCALAR_UINT_TYPE i23,
SCALAR_UINT_TYPE i24, SCALAR_UINT_TYPE i25, SCALAR_UINT_TYPE i26, SCALAR_UINT_TYPE i27, SCALAR_UINT_TYPE i28, SCALAR_UINT_TYPE i29, SCALAR_UINT_TYPE i30, SCALAR_UINT_TYPE i31,
SCALAR_UINT_TYPE i32, SCALAR_UINT_TYPE i33, SCALAR_UINT_TYPE i34, SCALAR_UINT_TYPE i35, SCALAR_UINT_TYPE i36, SCALAR_UINT_TYPE i37, SCALAR_UINT_TYPE i38, SCALAR_UINT_TYPE i39,
SCALAR_UINT_TYPE i40, SCALAR_UINT_TYPE i41, SCALAR_UINT_TYPE i42, SCALAR_UINT_TYPE i43, SCALAR_UINT_TYPE i44, SCALAR_UINT_TYPE i45, SCALAR_UINT_TYPE i46, SCALAR_UINT_TYPE i47,
SCALAR_UINT_TYPE i48, SCALAR_UINT_TYPE i49, SCALAR_UINT_TYPE i50, SCALAR_UINT_TYPE i51, SCALAR_UINT_TYPE i52, SCALAR_UINT_TYPE i53, SCALAR_UINT_TYPE i54, SCALAR_UINT_TYPE i55,
SCALAR_UINT_TYPE i56, SCALAR_UINT_TYPE i57, SCALAR_UINT_TYPE i58, SCALAR_UINT_TYPE i59, SCALAR_UINT_TYPE i60, SCALAR_UINT_TYPE i61, SCALAR_UINT_TYPE i62, SCALAR_UINT_TYPE i63,
SCALAR_UINT_TYPE i64, SCALAR_UINT_TYPE i65, SCALAR_UINT_TYPE i66, SCALAR_UINT_TYPE i67, SCALAR_UINT_TYPE i68, SCALAR_UINT_TYPE i69, SCALAR_UINT_TYPE i70, SCALAR_UINT_TYPE i71,
SCALAR_UINT_TYPE i72, SCALAR_UINT_TYPE i73, SCALAR_UINT_TYPE i74, SCALAR_UINT_TYPE i75, SCALAR_UINT_TYPE i76, SCALAR_UINT_TYPE i77, SCALAR_UINT_TYPE i78, SCALAR_UINT_TYPE i79,
SCALAR_UINT_TYPE i80, SCALAR_UINT_TYPE i81, SCALAR_UINT_TYPE i82, SCALAR_UINT_TYPE i83, SCALAR_UINT_TYPE i84, SCALAR_UINT_TYPE i85, SCALAR_UINT_TYPE i86, SCALAR_UINT_TYPE i87,
SCALAR_UINT_TYPE i88, SCALAR_UINT_TYPE i89, SCALAR_UINT_TYPE i90, SCALAR_UINT_TYPE i91, SCALAR_UINT_TYPE i92, SCALAR_UINT_TYPE i93, SCALAR_UINT_TYPE i94, SCALAR_UINT_TYPE i95,
SCALAR_UINT_TYPE i96, SCALAR_UINT_TYPE i97, SCALAR_UINT_TYPE i98, SCALAR_UINT_TYPE i99, SCALAR_UINT_TYPE i100, SCALAR_UINT_TYPE i101, SCALAR_UINT_TYPE i102, SCALAR_UINT_TYPE i103,
SCALAR_UINT_TYPE i104, SCALAR_UINT_TYPE i105, SCALAR_UINT_TYPE i106, SCALAR_UINT_TYPE i107, SCALAR_UINT_TYPE i108, SCALAR_UINT_TYPE i109, SCALAR_UINT_TYPE i110, SCALAR_UINT_TYPE i111,
SCALAR_UINT_TYPE i112, SCALAR_UINT_TYPE i113, SCALAR_UINT_TYPE i114, SCALAR_UINT_TYPE i115, SCALAR_UINT_TYPE i116, SCALAR_UINT_TYPE i117, SCALAR_UINT_TYPE i118, SCALAR_UINT_TYPE i119,
SCALAR_UINT_TYPE i120, SCALAR_UINT_TYPE i121, SCALAR_UINT_TYPE i122, SCALAR_UINT_TYPE i123, SCALAR_UINT_TYPE i124, SCALAR_UINT_TYPE i125, SCALAR_UINT_TYPE i126, SCALAR_UINT_TYPE i127)
{
insert(0, i0); insert(1, i1); insert(2, i2); insert(3, i3);
insert(4, i4); insert(5, i5); insert(6, i6); insert(7, i7);
insert(8, i8); insert(9, i9); insert(10, i10); insert(11, i11);
insert(12, i12); insert(13, i13); insert(14, i14); insert(15, i15);
insert(16, i16); insert(17, i17); insert(18, i18); insert(19, i19);
insert(20, i20); insert(21, i21); insert(22, i22); insert(23, i23);
insert(24, i24); insert(25, i25); insert(26, i26); insert(27, i27);
insert(28, i28); insert(29, i29); insert(30, i30); insert(31, i31);
insert(32, i32); insert(33, i33); insert(34, i34); insert(35, i35);
insert(36, i36); insert(37, i37); insert(38, i38); insert(39, i39);
insert(40, i40); insert(41, i41); insert(42, i42); insert(43, i43);
insert(44, i44); insert(45, i45); insert(46, i46); insert(47, i47);
insert(48, i48); insert(49, i49); insert(50, i50); insert(51, i51);
insert(52, i52); insert(53, i53); insert(54, i54); insert(55, i55);
insert(56, i56); insert(57, i57); insert(58, i58); insert(59, i59);
insert(60, i60); insert(61, i61); insert(62, i62); insert(63, i63);
insert(64, i64); insert(65, i65); insert(66, i66); insert(67, i67);
insert(68, i68); insert(69, i69); insert(70, i70); insert(71, i71);
insert(72, i72); insert(73, i73); insert(74, i74); insert(75, i75);
insert(76, i76); insert(77, i77); insert(78, i78); insert(79, i79);
insert(80, i80); insert(81, i81); insert(82, i82); insert(83, i83);
insert(84, i84); insert(85, i85); insert(86, i86); insert(87, i87);
insert(88, i88); insert(89, i89); insert(90, i90); insert(91, i91);
insert(92, i92); insert(93, i93); insert(94, i94); insert(95, i95);
insert(96, i96); insert(97, i97); insert(98, i98); insert(99, i99);
insert(100, i100); insert(101, i101); insert(102, i102); insert(103, i103);
insert(104, i104); insert(105, i105); insert(106, i106); insert(107, i107);
insert(108, i108); insert(109, i109); insert(110, i110); insert(111, i111);
insert(112, i112); insert(113, i113); insert(114, i114); insert(115, i115);
insert(116, i116); insert(117, i117); insert(118, i118); insert(119, i119);
insert(120, i120); insert(121, i121); insert(122, i122); insert(123, i123);
insert(124, i124); insert(125, i125); insert(126, i126); insert(127, i127);
}
// EXTRACT
UME_FORCE_INLINE SCALAR_UINT_TYPE extract(uint32_t index) const {
return mVec[index];
}
UME_FORCE_INLINE SCALAR_UINT_TYPE operator[] (uint32_t index) const {
return extract(index);
}
// INSERT
UME_FORCE_INLINE SIMDVec_u & insert(uint32_t index, SCALAR_UINT_TYPE value) {
mVec[index] = value;
return *this;
}
UME_FORCE_INLINE IntermediateIndex<SIMDVec_u, SCALAR_UINT_TYPE> operator[] (uint32_t index) {
return IntermediateIndex<SIMDVec_u, SCALAR_UINT_TYPE>(index, static_cast<SIMDVec_u &>(*this));
}
// Override Mask Access operators
#if defined(USE_PARENTHESES_IN_MASK_ASSIGNMENT)
UME_FORCE_INLINE IntermediateMask<SIMDVec_u, SCALAR_UINT_TYPE, MASK_TYPE> operator() (MASK_TYPE const & mask) {
return IntermediateMask<SIMDVec_u, SCALAR_UINT_TYPE, MASK_TYPE>(mask, static_cast<SIMDVec_u &>(*this));
}
#else
UME_FORCE_INLINE IntermediateMask<SIMDVec_u, SCALAR_UINT_TYPE, MASK_TYPE> operator[] (MASK_TYPE const & mask) {
return IntermediateMask<SIMDVec_u, SCALAR_UINT_TYPE, MASK_TYPE>(mask, static_cast<SIMDVec_u &>(*this));
}
#endif
// ASSIGNV
UME_FORCE_INLINE SIMDVec_u & assign(SIMDVec_u const & src) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_src_ptr = &src.mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_ptr[i] = local_src_ptr[i];
}
return *this;
}
UME_FORCE_INLINE SIMDVec_u & operator= (SIMDVec_u const & b) {
return this->assign(b);
}
// MASSIGNV
UME_FORCE_INLINE SIMDVec_u & assign(SIMDVecMask<VEC_LEN> const & mask, SIMDVec_u const & src) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_src_ptr = &src.mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if (local_mask_ptr[i] == true) local_ptr[i] = local_src_ptr[i];
}
return *this;
}
// ASSIGNS
UME_FORCE_INLINE SIMDVec_u & assign(SCALAR_UINT_TYPE b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_ptr[i] = b;
}
return *this;
}
UME_FORCE_INLINE SIMDVec_u & operator= (SCALAR_UINT_TYPE b) {
return this->assign(b);
}
// MASSIGNS
UME_FORCE_INLINE SIMDVec_u & assign(SIMDVecMask<VEC_LEN> const & mask, SCALAR_UINT_TYPE b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if (local_mask_ptr[i] == true) local_ptr[i] = b;
}
return *this;
}
// PREFETCH0
// PREFETCH1
// PREFETCH2
// LOAD
UME_FORCE_INLINE SIMDVec_u & load(SCALAR_UINT_TYPE const *p) {
SCALAR_UINT_TYPE *local_ptr = &mVec[0];
SCALAR_UINT_TYPE const *local_p_ptr = &p[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_ptr[i] = local_p_ptr[i];
}
return *this;
}
// MLOAD
UME_FORCE_INLINE SIMDVec_u & load(SIMDVecMask<VEC_LEN> const & mask, SCALAR_UINT_TYPE const *p) {
SCALAR_UINT_TYPE *local_ptr = &mVec[0];
SCALAR_UINT_TYPE const *local_p_ptr = &p[0];
bool const *local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if (local_mask_ptr[i] == true) local_ptr[i] = local_p_ptr[i];
}
return *this;
}
// LOADA
UME_FORCE_INLINE SIMDVec_u & loada(SCALAR_UINT_TYPE const *p) {
SCALAR_UINT_TYPE *local_ptr = &mVec[0];
SCALAR_UINT_TYPE const *local_p_ptr = &p[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_ptr[i] = local_p_ptr[i];
}
return *this;
}
// MLOADA
UME_FORCE_INLINE SIMDVec_u & loada(SIMDVecMask<VEC_LEN> const & mask, SCALAR_UINT_TYPE const *p) {
SCALAR_UINT_TYPE *local_ptr = &mVec[0];
SCALAR_UINT_TYPE const *local_p_ptr = &p[0];
bool const *local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if (local_mask_ptr[i] == true) local_ptr[i] = local_p_ptr[i];
}
return *this;
}
// STORE
UME_FORCE_INLINE SCALAR_UINT_TYPE* store(SCALAR_UINT_TYPE* p) const {
SCALAR_UINT_TYPE const *local_ptr = &mVec[0];
SCALAR_UINT_TYPE *local_p_ptr = &p[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_p_ptr[i] = local_ptr[i];
}
return p;
}
// MSTORE
UME_FORCE_INLINE SCALAR_UINT_TYPE* store(SIMDVecMask<VEC_LEN> const & mask, SCALAR_UINT_TYPE* p) const {
SCALAR_UINT_TYPE const *local_ptr = &mVec[0];
SCALAR_UINT_TYPE *local_p_ptr = &p[0];
bool const *local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if (local_mask_ptr[i] == true) local_p_ptr[i] = local_ptr[i];
}
return p;
}
// STOREA
UME_FORCE_INLINE SCALAR_UINT_TYPE* storea(SCALAR_UINT_TYPE* p) const {
SCALAR_UINT_TYPE const *local_ptr = &mVec[0];
SCALAR_UINT_TYPE *local_p_ptr = &p[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_p_ptr[i] = local_ptr[i];
}
return p;
}
// MSTOREA
UME_FORCE_INLINE SCALAR_UINT_TYPE* storea(SIMDVecMask<VEC_LEN> const & mask, SCALAR_UINT_TYPE* p) const {
SCALAR_UINT_TYPE const *local_ptr = &mVec[0];
SCALAR_UINT_TYPE *local_p_ptr = &p[0];
bool const *local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if (local_mask_ptr[i] == true) local_p_ptr[i] = local_ptr[i];
}
return p;
}
// BLENDV
UME_FORCE_INLINE SIMDVec_u blend(SIMDVecMask<VEC_LEN> const & mask, SIMDVec_u const & b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE *retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const *local_ptr = &mVec[0];
SCALAR_UINT_TYPE const *local_b_ptr = &b.mVec[0];
bool const *local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) retval_ptr[i] = local_b_ptr[i];
else retval_ptr[i] = local_ptr[i];
}
return retval;
}
// BLENDS
UME_FORCE_INLINE SIMDVec_u blend(SIMDVecMask<VEC_LEN> const & mask, SCALAR_UINT_TYPE b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE const *local_ptr = &mVec[0];
SCALAR_UINT_TYPE *retval_ptr = &retval.mVec[0];
bool const *local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) retval_ptr[i] = b;
else retval_ptr[i] = local_ptr[i];
}
return retval;
}
// SWIZZLE
// SWIZZLEA
// ADDV
UME_FORCE_INLINE SIMDVec_u add(SIMDVec_u const & b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_retval_ptr[i] = local_ptr[i] + local_b_ptr[i];
}
return retval;
}
UME_FORCE_INLINE SIMDVec_u operator+ (SIMDVec_u const & b) const {
return add(b);
}
// MADDV
UME_FORCE_INLINE SIMDVec_u add(SIMDVecMask<VEC_LEN> const & mask, SIMDVec_u const & b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_retval_ptr[i] = local_ptr[i] + local_b_ptr[i];
else local_retval_ptr[i] = local_ptr[i];
}
return retval;
}
// ADDS
UME_FORCE_INLINE SIMDVec_u add(SCALAR_UINT_TYPE b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_retval_ptr[i] = local_ptr[i] + b;
}
return retval;
}
UME_FORCE_INLINE SIMDVec_u operator+ (SCALAR_UINT_TYPE b) const {
return add(b);
}
// MADDS
UME_FORCE_INLINE SIMDVec_u add(SIMDVecMask<VEC_LEN> const & mask, SCALAR_UINT_TYPE b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_retval_ptr[i] = local_ptr[i] + b;
else local_retval_ptr[i] = local_ptr[i];
}
return retval;
}
// ADDVA
UME_FORCE_INLINE SIMDVec_u & adda(SIMDVec_u const & b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_ptr[i] += local_b_ptr[i];
}
return *this;
}
UME_FORCE_INLINE SIMDVec_u & operator+= (SIMDVec_u const & b) {
return adda(b);
}
// MADDVA
UME_FORCE_INLINE SIMDVec_u & adda(SIMDVecMask<VEC_LEN> const & mask, SIMDVec_u const & b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_ptr[i] += local_b_ptr[i];
}
return *this;
}
// ADDSA
UME_FORCE_INLINE SIMDVec_u & adda(SCALAR_UINT_TYPE b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_ptr[i] += b;
}
return *this;
}
UME_FORCE_INLINE SIMDVec_u & operator+= (SCALAR_UINT_TYPE b) {
return adda(b);
}
// MADDSA
UME_FORCE_INLINE SIMDVec_u & adda(SIMDVecMask<VEC_LEN> const & mask, SCALAR_UINT_TYPE b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_ptr[i] += b;
}
return *this;
}
// SADDV
// MSADDV
// SADDS
// MSADDS
// SADDVA
// MSADDVA
// SADDSA
// MSADDSA
// POSTINC
UME_FORCE_INLINE SIMDVec_u postinc() {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_retval_ptr[i] = local_ptr[i]++;
}
return retval;
}
UME_FORCE_INLINE SIMDVec_u operator++ (int) {
return postinc();
}
// MPOSTINC
UME_FORCE_INLINE SIMDVec_u postinc(SIMDVecMask<VEC_LEN> const & mask) {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_retval_ptr[i] = local_ptr[i]++;
else local_retval_ptr[i] = local_ptr[i];
}
return retval;
}
// PREFINC
UME_FORCE_INLINE SIMDVec_u & prefinc() {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
++local_ptr[i];
}
return *this;
}
UME_FORCE_INLINE SIMDVec_u & operator++ () {
return prefinc();
}
// MPREFINC
UME_FORCE_INLINE SIMDVec_u & prefinc(SIMDVecMask<VEC_LEN> const & mask) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) ++local_ptr[i];
}
return *this;
}
// SUBV
UME_FORCE_INLINE SIMDVec_u sub(SIMDVec_u const & b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_retval_ptr[i] = local_ptr[i] - local_b_ptr[i];
}
return retval;
}
UME_FORCE_INLINE SIMDVec_u operator- (SIMDVec_u const & b) const {
return sub(b);
}
// MSUBV
UME_FORCE_INLINE SIMDVec_u sub(SIMDVecMask<VEC_LEN> const & mask, SIMDVec_u const & b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_retval_ptr[i] = local_ptr[i] - local_b_ptr[i];
else local_retval_ptr[i] = local_ptr[i];
}
return retval;
}
// SUBS
UME_FORCE_INLINE SIMDVec_u sub(SCALAR_UINT_TYPE b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_retval_ptr[i] = local_ptr[i] - b;
}
return retval;
}
UME_FORCE_INLINE SIMDVec_u operator- (SCALAR_UINT_TYPE b) const {
return sub(b);
}
// MSUBS
UME_FORCE_INLINE SIMDVec_u sub(SIMDVecMask<VEC_LEN> const & mask, SCALAR_UINT_TYPE b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_retval_ptr[i] = local_ptr[i] - b;
else local_retval_ptr[i] = local_ptr[i];
}
return retval;
}
// SUBVA
UME_FORCE_INLINE SIMDVec_u & suba(SIMDVec_u const & b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_ptr[i] -= local_b_ptr[i];
}
return *this;
}
UME_FORCE_INLINE SIMDVec_u & operator-= (SIMDVec_u const & b) {
return suba(b);
}
// MSUBVA
UME_FORCE_INLINE SIMDVec_u & suba(SIMDVecMask<VEC_LEN> const & mask, SIMDVec_u const & b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_ptr[i] -= local_b_ptr[i];
}
return *this;
}
// SUBSA
UME_FORCE_INLINE SIMDVec_u & suba(SCALAR_UINT_TYPE b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_ptr[i] -= b;
}
return *this;
}
UME_FORCE_INLINE SIMDVec_u & operator-= (SCALAR_UINT_TYPE b) {
return suba(b);
}
// MSUBSA
UME_FORCE_INLINE SIMDVec_u & suba(SIMDVecMask<VEC_LEN> const & mask, SCALAR_UINT_TYPE b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_ptr[i] -= b;
}
return *this;
}
// SSUBV
// MSSUBV
// SSUBS
// MSSUBS
// SSUBVA
// MSSUBVA
// SSUBSA
// MSSUBSA
// SUBFROMV
UME_FORCE_INLINE SIMDVec_u subfrom(SIMDVec_u const & b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_retval_ptr[i] = local_b_ptr[i] - local_ptr[i];
}
return retval;
}
// MSUBFROMV
UME_FORCE_INLINE SIMDVec_u subfrom(SIMDVecMask<VEC_LEN> const & mask, SIMDVec_u const & b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_retval_ptr[i] = local_b_ptr[i] - local_ptr[i];
else local_retval_ptr[i] = local_b_ptr[i];
}
return retval;
}
// SUBFROMS
UME_FORCE_INLINE SIMDVec_u subfrom(SCALAR_UINT_TYPE b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_retval_ptr[i] = b - local_ptr[i];
}
return retval;
}
// MSUBFROMS
UME_FORCE_INLINE SIMDVec_u subfrom(SIMDVecMask<VEC_LEN> const & mask, SCALAR_UINT_TYPE b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_retval_ptr[i] = b - local_ptr[i];
else local_retval_ptr[i] = b;
}
return retval;
}
// SUBFROMVA
UME_FORCE_INLINE SIMDVec_u & subfroma(SIMDVec_u const & b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_ptr[i] = local_b_ptr[i] - local_ptr[i];
}
return *this;
}
// MSUBFROMVA
UME_FORCE_INLINE SIMDVec_u & subfroma(SIMDVecMask<VEC_LEN> const & mask, SIMDVec_u const & b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_ptr[i] = local_b_ptr[i] - local_ptr[i];
else local_ptr[i] = local_b_ptr[i];
}
return *this;
}
// SUBFROMSA
UME_FORCE_INLINE SIMDVec_u & subfroma(SCALAR_UINT_TYPE b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_ptr[i] = b - local_ptr[i];
}
return *this;
}
// MSUBFROMSA
UME_FORCE_INLINE SIMDVec_u & subfroma(SIMDVecMask<VEC_LEN> const & mask, SCALAR_UINT_TYPE b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_ptr[i] = b - local_ptr[i];
else local_ptr[i] = b;
}
return *this;
}
// POSTDEC
UME_FORCE_INLINE SIMDVec_u postdec() {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_retval_ptr[i] = local_ptr[i]--;
}
return retval;
}
UME_FORCE_INLINE SIMDVec_u operator-- (int) {
return postdec();
}
// MPOSTDEC
UME_FORCE_INLINE SIMDVec_u postdec(SIMDVecMask<VEC_LEN> const & mask) {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_retval_ptr[i] = local_ptr[i]--;
else local_retval_ptr[i] = local_ptr[i];
}
return retval;
}
// PREFDEC
UME_FORCE_INLINE SIMDVec_u & prefdec() {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
--local_ptr[i];
}
return *this;
}
UME_FORCE_INLINE SIMDVec_u & operator-- () {
return prefdec();
}
// MPREFDEC
UME_FORCE_INLINE SIMDVec_u & prefdec(SIMDVecMask<VEC_LEN> const & mask) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) --local_ptr[i];
}
return *this;
}
// MULV
UME_FORCE_INLINE SIMDVec_u mul(SIMDVec_u const & b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_retval_ptr[i] = local_ptr[i] * local_b_ptr[i];
}
return retval;
}
UME_FORCE_INLINE SIMDVec_u operator* (SIMDVec_u const & b) const {
return mul(b);
}
// MMULV
UME_FORCE_INLINE SIMDVec_u mul(SIMDVecMask<VEC_LEN> const & mask, SIMDVec_u const & b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_retval_ptr[i] = local_ptr[i] * local_b_ptr[i];
else local_retval_ptr[i] = local_ptr[i];
}
return retval;
}
// MULS
UME_FORCE_INLINE SIMDVec_u mul(SCALAR_UINT_TYPE b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_retval_ptr[i] = local_ptr[i] * b;
}
return retval;
}
UME_FORCE_INLINE SIMDVec_u operator* (SCALAR_UINT_TYPE b) const {
return mul(b);
}
// MMULS
UME_FORCE_INLINE SIMDVec_u mul(SIMDVecMask<VEC_LEN> const & mask, SCALAR_UINT_TYPE b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_retval_ptr[i] = local_ptr[i] * b;
else local_retval_ptr[i] = local_ptr[i];
}
return retval;
}
// MULVA
UME_FORCE_INLINE SIMDVec_u & mula(SIMDVec_u const & b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_ptr[i] *= local_b_ptr[i];
}
return *this;
}
UME_FORCE_INLINE SIMDVec_u & operator*= (SIMDVec_u const & b) {
return mula(b);
}
// MMULVA
UME_FORCE_INLINE SIMDVec_u & mula(SIMDVecMask<VEC_LEN> const & mask, SIMDVec_u const & b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_ptr[i] *= local_b_ptr[i];
}
return *this;
}
// MULSA
UME_FORCE_INLINE SIMDVec_u & mula(SCALAR_UINT_TYPE b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_ptr[i] *= b;
}
return *this;
}
UME_FORCE_INLINE SIMDVec_u & operator*= (SCALAR_UINT_TYPE b) {
return mula(b);
}
// MMULSA
UME_FORCE_INLINE SIMDVec_u & mula(SIMDVecMask<VEC_LEN> const & mask, SCALAR_UINT_TYPE b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_ptr[i] *= b;
}
return *this;
}
// DIVV
UME_FORCE_INLINE SIMDVec_u div(SIMDVec_u const & b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_retval_ptr[i] = local_ptr[i] / local_b_ptr[i];
}
return retval;
}
UME_FORCE_INLINE SIMDVec_u operator/ (SIMDVec_u const & b) const {
return div(b);
}
// MDIVV
UME_FORCE_INLINE SIMDVec_u div(SIMDVecMask<VEC_LEN> const & mask, SIMDVec_u const & b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_retval_ptr[i] = local_ptr[i] / local_b_ptr[i];
else local_retval_ptr[i] = local_ptr[i];
}
return retval;
}
// DIVS
UME_FORCE_INLINE SIMDVec_u div(SCALAR_UINT_TYPE b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_retval_ptr[i] = local_ptr[i] / b;
}
return retval;
}
UME_FORCE_INLINE SIMDVec_u operator/ (SCALAR_UINT_TYPE b) const {
return div(b);
}
// MDIVS
UME_FORCE_INLINE SIMDVec_u div(SIMDVecMask<VEC_LEN> const & mask, SCALAR_UINT_TYPE b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_retval_ptr[i] = local_ptr[i] / b;
else local_retval_ptr[i] = local_ptr[i];
}
return retval;
}
// DIVVA
UME_FORCE_INLINE SIMDVec_u & diva(SIMDVec_u const & b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_ptr[i] /= local_b_ptr[i];
}
return *this;
}
UME_FORCE_INLINE SIMDVec_u & operator/= (SIMDVec_u const & b) {
return diva(b);
}
// MDIVVA
UME_FORCE_INLINE SIMDVec_u & diva(SIMDVecMask<VEC_LEN> const & mask, SIMDVec_u const & b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_ptr[i] /= local_b_ptr[i];
}
return *this;
}
// DIVSA
UME_FORCE_INLINE SIMDVec_u & diva(SCALAR_UINT_TYPE b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_ptr[i] /= b;
}
return *this;
}
UME_FORCE_INLINE SIMDVec_u & operator/= (SCALAR_UINT_TYPE b) {
return diva(b);
}
// MDIVSA
UME_FORCE_INLINE SIMDVec_u & diva(SIMDVecMask<VEC_LEN> const & mask, SCALAR_UINT_TYPE b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_ptr[i] /= b;
}
return *this;
}
// RCP
UME_FORCE_INLINE SIMDVec_u rcp() const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_retval_ptr[i] = SCALAR_UINT_TYPE(1.0f) / local_ptr[i];
}
return retval;
}
// MRCP
UME_FORCE_INLINE SIMDVec_u rcp(SIMDVecMask<VEC_LEN> const & mask) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_retval_ptr[i] = SCALAR_UINT_TYPE(1.0f) / local_ptr[i];
else local_retval_ptr[i] = local_ptr[i];
}
return retval;
}
// RCPS
UME_FORCE_INLINE SIMDVec_u rcp(SCALAR_UINT_TYPE b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_retval_ptr[i] = b / local_ptr[i];
}
return retval;
}
// MRCPS
UME_FORCE_INLINE SIMDVec_u rcp(SIMDVecMask<VEC_LEN> const & mask, SCALAR_UINT_TYPE b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_retval_ptr[i] = b / local_ptr[i];
else local_retval_ptr[i] = local_ptr[i];
}
return retval;
}
// RCPA
UME_FORCE_INLINE SIMDVec_u & rcpa() {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_ptr[i] = SCALAR_UINT_TYPE(1.0f) / local_ptr[i];
}
return *this;
}
// MRCPA
UME_FORCE_INLINE SIMDVec_u & rcpa(SIMDVecMask<VEC_LEN> const & mask) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_ptr[i] = SCALAR_UINT_TYPE(1.0f) / local_ptr[i];
}
return *this;
}
// RCPSA
UME_FORCE_INLINE SIMDVec_u & rcpa(SCALAR_UINT_TYPE b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_ptr[i] = b / local_ptr[i];
}
return *this;
}
// MRCPSA
UME_FORCE_INLINE SIMDVec_u & rcpa(SIMDVecMask<VEC_LEN> const & mask, SCALAR_UINT_TYPE b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_ptr[i] = b / local_ptr[i];
}
return *this;
}
// CMPEQV
UME_FORCE_INLINE SIMDVecMask<VEC_LEN> cmpeq(SIMDVec_u const & b) const {
SIMDVecMask<VEC_LEN> retval;
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
bool * local_retval_ptr = &retval.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_retval_ptr[i] = local_ptr[i] == local_b_ptr[i];
}
return retval;
}
UME_FORCE_INLINE SIMDVecMask<VEC_LEN> operator== (SIMDVec_u const & b) const {
return cmpeq(b);
}
// CMPEQS
UME_FORCE_INLINE SIMDVecMask<VEC_LEN> cmpeq(SCALAR_UINT_TYPE b) const {
SIMDVecMask<VEC_LEN> retval;
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
bool * local_retval_ptr = &retval.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_retval_ptr[i] = local_ptr[i] == b;
}
return retval;
}
UME_FORCE_INLINE SIMDVecMask<VEC_LEN> operator== (SCALAR_UINT_TYPE b) const {
return cmpeq(b);
}
// CMPNEV
UME_FORCE_INLINE SIMDVecMask<VEC_LEN> cmpne(SIMDVec_u const & b) const {
SIMDVecMask<VEC_LEN> retval;
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
bool * local_retval_ptr = &retval.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_retval_ptr[i] = local_ptr[i] != local_b_ptr[i];
}
return retval;
}
UME_FORCE_INLINE SIMDVecMask<VEC_LEN> operator!= (SIMDVec_u const & b) const {
return cmpne(b);
}
// CMPNES
UME_FORCE_INLINE SIMDVecMask<VEC_LEN> cmpne(SCALAR_UINT_TYPE b) const {
SIMDVecMask<VEC_LEN> retval;
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
bool * local_retval_ptr = &retval.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_retval_ptr[i] = local_ptr[i] != b;
}
return retval;
}
UME_FORCE_INLINE SIMDVecMask<VEC_LEN> operator!= (SCALAR_UINT_TYPE b) const {
return cmpne(b);
}
// CMPGTV
UME_FORCE_INLINE SIMDVecMask<VEC_LEN> cmpgt(SIMDVec_u const & b) const {
SIMDVecMask<VEC_LEN> retval;
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
bool * local_retval_ptr = &retval.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_retval_ptr[i] = local_ptr[i] > local_b_ptr[i];
}
return retval;
}
UME_FORCE_INLINE SIMDVecMask<VEC_LEN> operator> (SIMDVec_u const & b) const {
return cmpgt(b);
}
// CMPGTS
UME_FORCE_INLINE SIMDVecMask<VEC_LEN> cmpgt(SCALAR_UINT_TYPE b) const {
SIMDVecMask<VEC_LEN> retval;
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
bool * local_retval_ptr = &retval.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_retval_ptr[i] = local_ptr[i] > b;
}
return retval;
}
UME_FORCE_INLINE SIMDVecMask<VEC_LEN> operator> (SCALAR_UINT_TYPE b) const {
return cmpgt(b);
}
// CMPLTV
UME_FORCE_INLINE SIMDVecMask<VEC_LEN> cmplt(SIMDVec_u const & b) const {
SIMDVecMask<VEC_LEN> retval;
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
bool * local_retval_ptr = &retval.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_retval_ptr[i] = local_ptr[i] < local_b_ptr[i];
}
return retval;
}
UME_FORCE_INLINE SIMDVecMask<VEC_LEN> operator< (SIMDVec_u const & b) const {
return cmplt(b);
}
// CMPLTS
UME_FORCE_INLINE SIMDVecMask<VEC_LEN> cmplt(SCALAR_UINT_TYPE b) const {
SIMDVecMask<VEC_LEN> retval;
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
bool * local_retval_ptr = &retval.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_retval_ptr[i] = local_ptr[i] < b;
}
return retval;
}
UME_FORCE_INLINE SIMDVecMask<VEC_LEN> operator< (SCALAR_UINT_TYPE b) const {
return cmplt(b);
}
// CMPGEV
UME_FORCE_INLINE SIMDVecMask<VEC_LEN> cmpge(SIMDVec_u const & b) const {
SIMDVecMask<VEC_LEN> retval;
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
bool * local_retval_ptr = &retval.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_retval_ptr[i] = local_ptr[i] >= local_b_ptr[i];
}
return retval;
}
UME_FORCE_INLINE SIMDVecMask<VEC_LEN> operator>= (SIMDVec_u const & b) const {
return cmpge(b);
}
// CMPGES
UME_FORCE_INLINE SIMDVecMask<VEC_LEN> cmpge(SCALAR_UINT_TYPE b) const {
SIMDVecMask<VEC_LEN> retval;
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
bool * local_retval_ptr = &retval.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_retval_ptr[i] = local_ptr[i] >= b;
}
return retval;
}
UME_FORCE_INLINE SIMDVecMask<VEC_LEN> operator>= (SCALAR_UINT_TYPE b) const {
return cmpge(b);
}
// CMPLEV
UME_FORCE_INLINE SIMDVecMask<VEC_LEN> cmple(SIMDVec_u const & b) const {
SIMDVecMask<VEC_LEN> retval;
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
bool * local_retval_ptr = &retval.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_retval_ptr[i] = local_ptr[i] <= local_b_ptr[i];
}
return retval;
}
UME_FORCE_INLINE SIMDVecMask<VEC_LEN> operator<= (SIMDVec_u const & b) const {
return cmple(b);
}
// CMPLES
UME_FORCE_INLINE SIMDVecMask<VEC_LEN> cmple(SCALAR_UINT_TYPE b) const {
SIMDVecMask<VEC_LEN> retval;
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
bool * local_retval_ptr = &retval.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_retval_ptr[i] = local_ptr[i] <= b;
}
return retval;
}
UME_FORCE_INLINE SIMDVecMask<VEC_LEN> operator<= (SCALAR_UINT_TYPE b) const {
return cmple(b);
}
// CMPEV
UME_FORCE_INLINE bool cmpe(SIMDVec_u const & b) const {
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
bool local_mask_ptr[VEC_LEN];
bool retval = true;
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_mask_ptr[i] = local_ptr[i] == local_b_ptr[i];
}
#pragma omp simd reduction(&&:retval)
for(unsigned int i = 0; i < VEC_LEN; i++) {
retval = retval && local_mask_ptr[i];
}
return retval;
}
// CMPES
UME_FORCE_INLINE bool cmpe(SCALAR_UINT_TYPE b) const {
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
bool local_mask_ptr[VEC_LEN];
bool retval = true;
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_mask_ptr[i] = local_ptr[i] == b;
}
#pragma omp simd reduction(&&:retval)
for(unsigned int i = 0; i < VEC_LEN; i++) {
retval = retval && local_mask_ptr[i];
}
return retval;
}
// UNIQUE
// TODO
// HADD
UME_FORCE_INLINE SCALAR_UINT_TYPE hadd() const {
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE retval = SCALAR_UINT_TYPE(0.0f);
#pragma omp simd reduction(+:retval)
for(unsigned int i = 0; i < VEC_LEN; i++) {
retval = retval + local_ptr[i];
}
return retval;
}
// MHADD
UME_FORCE_INLINE SCALAR_UINT_TYPE hadd(SIMDVecMask<VEC_LEN> const & mask) const {
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE masked_copy[VEC_LEN];
bool const * local_mask_ptr = &mask.mMask[0];
SCALAR_UINT_TYPE retval = SCALAR_UINT_TYPE(0.0f);
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) masked_copy[i] = local_ptr[i];
else masked_copy[i] = SCALAR_UINT_TYPE(0.0f);
}
#pragma omp simd reduction(+:retval)
for(unsigned int i = 0; i < VEC_LEN; i++) {
retval = retval + masked_copy[i];
}
return retval;
}
// HADDS
UME_FORCE_INLINE SCALAR_UINT_TYPE hadd(SCALAR_UINT_TYPE b) const {
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE retval = b;
#pragma omp simd reduction(+:retval)
for(unsigned int i = 0; i < VEC_LEN; i++) {
retval = retval + local_ptr[i];
}
return retval;
}
// MHADDS
UME_FORCE_INLINE SCALAR_UINT_TYPE hadd(SIMDVecMask<VEC_LEN> const & mask, SCALAR_UINT_TYPE b) const {
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE masked_copy[VEC_LEN];
bool const * local_mask_ptr = &mask.mMask[0];
SCALAR_UINT_TYPE retval = b;
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) masked_copy[i] = local_ptr[i];
else masked_copy[i] = SCALAR_UINT_TYPE(0.0f);
}
#pragma omp simd reduction(+:retval)
for(unsigned int i = 0; i < VEC_LEN; i++) {
retval = retval + masked_copy[i];
}
return retval;
}
// HMUL
UME_FORCE_INLINE SCALAR_UINT_TYPE hmul() const {
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE retval = SCALAR_UINT_TYPE(1.0f);
#pragma omp simd reduction(*:retval)
for(unsigned int i = 0; i < VEC_LEN; i++) {
retval = retval * local_ptr[i];
}
return retval; }
// MHMUL
UME_FORCE_INLINE SCALAR_UINT_TYPE hmul(SIMDVecMask<VEC_LEN> const & mask) const {
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE masked_copy[VEC_LEN];
bool const * local_mask_ptr = &mask.mMask[0];
SCALAR_UINT_TYPE retval = SCALAR_UINT_TYPE(1.0f);
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) masked_copy[i] = local_ptr[i];
else masked_copy[i] = SCALAR_UINT_TYPE(1.0f);
}
#pragma omp simd reduction(*:retval)
for(unsigned int i = 0; i < VEC_LEN; i++) {
retval = retval * masked_copy[i];
}
return retval;
}
// HMULS
UME_FORCE_INLINE SCALAR_UINT_TYPE hmul(SCALAR_UINT_TYPE b) const {
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE retval = b;
#pragma omp simd reduction(*:retval)
for(unsigned int i = 0; i < VEC_LEN; i++) {
retval = retval * local_ptr[i];
}
return retval;
}
// MHMULS
UME_FORCE_INLINE SCALAR_UINT_TYPE hmul(SIMDVecMask<VEC_LEN> const & mask, SCALAR_UINT_TYPE b) const {
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE masked_copy[VEC_LEN];
bool const * local_mask_ptr = &mask.mMask[0];
SCALAR_UINT_TYPE retval = b;
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) masked_copy[i] = local_ptr[i];
else masked_copy[i] = SCALAR_UINT_TYPE(1.0f);
}
#pragma omp simd reduction(*:retval)
for(unsigned int i = 0; i < VEC_LEN; i++) {
retval = retval * masked_copy[i];
}
return retval;
}
// FMULADDV
UME_FORCE_INLINE SIMDVec_u fmuladd(SIMDVec_u const & b, SIMDVec_u const & c) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
SCALAR_UINT_TYPE const * local_c_ptr = &c.mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_retval_ptr[i] = local_ptr[i] * local_b_ptr[i] + local_c_ptr[i];
}
return retval;
}
// MFMULADDV
UME_FORCE_INLINE SIMDVec_u fmuladd(SIMDVecMask<VEC_LEN> const & mask, SIMDVec_u const & b, SIMDVec_u const & c) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
SCALAR_UINT_TYPE const * local_c_ptr = &c.mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_retval_ptr[i] = local_ptr[i] * local_b_ptr[i] + local_c_ptr[i];
else local_retval_ptr[i] = local_ptr[i];
}
return retval;
}
// FMULSUBV
UME_FORCE_INLINE SIMDVec_u fmulsub(SIMDVec_u const & b, SIMDVec_u const & c) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
SCALAR_UINT_TYPE const * local_c_ptr = &c.mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_retval_ptr[i] = local_ptr[i] * local_b_ptr[i] - local_c_ptr[i];
}
return retval;
}
// MFMULSUBV
UME_FORCE_INLINE SIMDVec_u fmulsub(SIMDVecMask<VEC_LEN> const & mask, SIMDVec_u const & b, SIMDVec_u const & c) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
SCALAR_UINT_TYPE const * local_c_ptr = &c.mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_retval_ptr[i] = local_ptr[i] * local_b_ptr[i] - local_c_ptr[i];
else local_retval_ptr[i] = local_ptr[i];
}
return retval;
}
// FADDMULV
UME_FORCE_INLINE SIMDVec_u faddmul(SIMDVec_u const & b, SIMDVec_u const & c) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
SCALAR_UINT_TYPE const * local_c_ptr = &c.mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_retval_ptr[i] = (local_ptr[i] + local_b_ptr[i]) * local_c_ptr[i];
}
return retval;
}
// MFADDMULV
UME_FORCE_INLINE SIMDVec_u faddmul(SIMDVecMask<VEC_LEN> const & mask, SIMDVec_u const & b, SIMDVec_u const & c) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
SCALAR_UINT_TYPE const * local_c_ptr = &c.mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_retval_ptr[i] = (local_ptr[i] + local_b_ptr[i]) * local_c_ptr[i];
else local_retval_ptr[i] = local_ptr[i];
}
return retval;
}
// FSUBMULV
UME_FORCE_INLINE SIMDVec_u fsubmul(SIMDVec_u const & b, SIMDVec_u const & c) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
SCALAR_UINT_TYPE const * local_c_ptr = &c.mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_retval_ptr[i] = (local_ptr[i] - local_b_ptr[i]) * local_c_ptr[i];
}
return retval;
}
// MFSUBMULV
UME_FORCE_INLINE SIMDVec_u fsubmul(SIMDVecMask<VEC_LEN> const & mask, SIMDVec_u const & b, SIMDVec_u const & c) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
SCALAR_UINT_TYPE const * local_c_ptr = &c.mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_retval_ptr[i] = (local_ptr[i] - local_b_ptr[i]) * local_c_ptr[i];
else local_retval_ptr[i] = local_ptr[i];
}
return retval;
}
// MAXV
UME_FORCE_INLINE SIMDVec_u max(SIMDVec_u const & b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_ptr[i] > local_b_ptr[i]) local_retval_ptr[i] = local_ptr[i];
else local_retval_ptr[i] = local_b_ptr[i];
}
return retval;
}
// MMAXV
UME_FORCE_INLINE SIMDVec_u max(SIMDVecMask<VEC_LEN> const & mask, SIMDVec_u const & b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
bool predicate = local_ptr[i] > local_b_ptr[i];
bool cond = local_mask_ptr[i] && !predicate;
if(cond) local_retval_ptr[i] = local_b_ptr[i];
else local_retval_ptr[i] = local_ptr[i];
}
return retval;
}
// MAXS
UME_FORCE_INLINE SIMDVec_u max(SCALAR_UINT_TYPE b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_ptr[i] > b) local_retval_ptr[i] = local_ptr[i];
else local_retval_ptr[i] = b;
}
return retval;
}
// MMAXS
UME_FORCE_INLINE SIMDVec_u max(SIMDVecMask<VEC_LEN> const & mask, SCALAR_UINT_TYPE b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
bool predicate = local_ptr[i] > b;
bool cond = local_mask_ptr[i] && !predicate;
if(cond) local_retval_ptr[i] = b;
else local_retval_ptr[i] = local_ptr[i];
}
return retval;
}
// MAXVA
UME_FORCE_INLINE SIMDVec_u & maxa(SIMDVec_u const & b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_ptr[i] <= local_b_ptr[i]) local_ptr[i] = local_b_ptr[i];
}
return *this;
}
// MMAXVA
UME_FORCE_INLINE SIMDVec_u & maxa(SIMDVecMask<VEC_LEN> const & mask, SIMDVec_u const & b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
bool predicate = local_ptr[i] > local_b_ptr[i];
bool cond = local_mask_ptr[i] && !predicate;
if(cond) local_ptr[i] = local_b_ptr[i];
}
return *this;
}
// MAXSA
UME_FORCE_INLINE SIMDVec_u & maxa(SCALAR_UINT_TYPE b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_ptr[i] <= b) local_ptr[i] = b;
}
return *this;
}
// MMAXSA
UME_FORCE_INLINE SIMDVec_u & maxa(SIMDVecMask<VEC_LEN> const & mask, SCALAR_UINT_TYPE b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
bool predicate = local_ptr[i] > b;
bool cond = local_mask_ptr[i] && !predicate;
if(cond) local_ptr[i] = b;
}
return *this;
}
// MINV
UME_FORCE_INLINE SIMDVec_u min(SIMDVec_u const & b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_ptr[i] < local_b_ptr[i]) local_retval_ptr[i] = local_ptr[i];
else local_retval_ptr[i] = local_b_ptr[i];
}
return retval;
}
// MMINV
UME_FORCE_INLINE SIMDVec_u min(SIMDVecMask<VEC_LEN> const & mask, SIMDVec_u const & b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
bool predicate = local_ptr[i] < local_b_ptr[i];
bool cond = local_mask_ptr[i] && !predicate;
if(cond) local_retval_ptr[i] = local_b_ptr[i];
else local_retval_ptr[i] = local_ptr[i];
}
return retval;
}
// MINS
UME_FORCE_INLINE SIMDVec_u min(SCALAR_UINT_TYPE b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_ptr[i] < b) local_retval_ptr[i] = local_ptr[i];
else local_retval_ptr[i] = b;
}
return retval;
}
// MMINS
UME_FORCE_INLINE SIMDVec_u min(SIMDVecMask<VEC_LEN> const & mask, SCALAR_UINT_TYPE b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
bool predicate = local_ptr[i] < b;
bool cond = local_mask_ptr[i] && !predicate;
if(cond) local_retval_ptr[i] = b;
else local_retval_ptr[i] = local_ptr[i];
}
return retval;
}
// MINVA
UME_FORCE_INLINE SIMDVec_u & mina(SIMDVec_u const & b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_ptr[i] > local_b_ptr[i]) local_ptr[i] = local_b_ptr[i];
}
return *this;
}
// MMINVA
UME_FORCE_INLINE SIMDVec_u & mina(SIMDVecMask<VEC_LEN> const & mask, SIMDVec_u const & b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
bool predicate = local_ptr[i] < local_b_ptr[i];
bool cond = local_mask_ptr[i] && !predicate;
if(cond) local_ptr[i] = local_b_ptr[i];
}
return *this;
}
// MINSA
UME_FORCE_INLINE SIMDVec_u & mina(SCALAR_UINT_TYPE b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_ptr[i] > b) local_ptr[i] = b;
}
return *this;
}
// MMINSA
UME_FORCE_INLINE SIMDVec_u & mina(SIMDVecMask<VEC_LEN> const & mask, SCALAR_UINT_TYPE b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
bool predicate = local_ptr[i] < b;
bool cond = local_mask_ptr[i] && !predicate;
if(cond) local_ptr[i] = b;
}
return *this;
}
// HMAX
// MHMAX
// IMAX
// MIMAX
// HMIN
// MHMIN
// IMIN
// MIMIN
// BANDV
UME_FORCE_INLINE SIMDVec_u band(SIMDVec_u const & b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_retval_ptr[i] = local_ptr[i] & local_b_ptr[i];
}
return retval;
}
UME_FORCE_INLINE SIMDVec_u operator& (SIMDVec_u const & b) const {
return band(b);
}
// MBANDV
UME_FORCE_INLINE SIMDVec_u band(SIMDVecMask<VEC_LEN> const & mask, SIMDVec_u const & b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_retval_ptr[i] = local_ptr[i] & local_b_ptr[i];
else local_retval_ptr[i] = local_ptr[i];
}
return retval;
}
// BANDS
UME_FORCE_INLINE SIMDVec_u band(SCALAR_UINT_TYPE b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_retval_ptr[i] = local_ptr[i] & b;
}
return retval;
}
UME_FORCE_INLINE SIMDVec_u operator& (SCALAR_UINT_TYPE b) const {
return band(b);
}
// MBANDS
UME_FORCE_INLINE SIMDVec_u band(SIMDVecMask<VEC_LEN> const & mask, SCALAR_UINT_TYPE b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_retval_ptr[i] = local_ptr[i] & b;
else local_retval_ptr[i] = local_ptr[i];
}
return retval;
}
// BANDVA
UME_FORCE_INLINE SIMDVec_u & banda(SIMDVec_u const & b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_ptr[i] &= local_b_ptr[i];
}
return *this;
}
UME_FORCE_INLINE SIMDVec_u & operator&= (SIMDVec_u const & b) {
return banda(b);
}
// MBANDVA
UME_FORCE_INLINE SIMDVec_u & banda(SIMDVecMask<VEC_LEN> const & mask, SIMDVec_u const & b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_ptr[i] &= local_b_ptr[i];
}
return *this;
}
// BANDSA
UME_FORCE_INLINE SIMDVec_u & banda(SCALAR_UINT_TYPE b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_ptr[i] &= b;
}
return *this;
}
UME_FORCE_INLINE SIMDVec_u & operator&= (bool b) {
return banda(b);
}
// MBANDSA
UME_FORCE_INLINE SIMDVec_u & banda(SIMDVecMask<VEC_LEN> const & mask, SCALAR_UINT_TYPE b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_ptr[i] &= b;
}
return *this;
}
// BORV
UME_FORCE_INLINE SIMDVec_u bor(SIMDVec_u const & b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_retval_ptr[i] = local_ptr[i] | local_b_ptr[i];
}
return retval;
}
UME_FORCE_INLINE SIMDVec_u operator| (SIMDVec_u const & b) const {
return bor(b);
}
// MBORV
UME_FORCE_INLINE SIMDVec_u bor(SIMDVecMask<VEC_LEN> const & mask, SIMDVec_u const & b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_retval_ptr[i] = local_ptr[i] | local_b_ptr[i];
else local_retval_ptr[i] = local_ptr[i];
}
return retval;
}
// BORS
UME_FORCE_INLINE SIMDVec_u bor(SCALAR_UINT_TYPE b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_retval_ptr[i] = local_ptr[i] | b;
}
return retval;
}
UME_FORCE_INLINE SIMDVec_u operator| (SCALAR_UINT_TYPE b) const {
return bor(b);
}
// MBORS
UME_FORCE_INLINE SIMDVec_u bor(SIMDVecMask<VEC_LEN> const & mask, SCALAR_UINT_TYPE b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_retval_ptr[i] = local_ptr[i] | b;
else local_retval_ptr[i] = local_ptr[i];
}
return retval;
}
// BORVA
UME_FORCE_INLINE SIMDVec_u & bora(SIMDVec_u const & b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_ptr[i] |= local_b_ptr[i];
}
return *this;
}
UME_FORCE_INLINE SIMDVec_u & operator|= (SIMDVec_u const & b) {
return bora(b);
}
// MBORVA
UME_FORCE_INLINE SIMDVec_u & bora(SIMDVecMask<VEC_LEN> const & mask, SIMDVec_u const & b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_ptr[i] |= local_b_ptr[i];
}
return *this;
}
// BORSA
UME_FORCE_INLINE SIMDVec_u & bora(SCALAR_UINT_TYPE b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_ptr[i] |= b;
}
return *this;
}
UME_FORCE_INLINE SIMDVec_u & operator|= (SCALAR_UINT_TYPE b) {
return bora(b);
}
// MBORSA
UME_FORCE_INLINE SIMDVec_u & bora(SIMDVecMask<VEC_LEN> const & mask, SCALAR_UINT_TYPE b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_ptr[i] |= b;
}
return *this;
}
// BXORV
UME_FORCE_INLINE SIMDVec_u bxor(SIMDVec_u const & b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_retval_ptr[i] = local_ptr[i] ^ local_b_ptr[i];
}
return retval;
}
UME_FORCE_INLINE SIMDVec_u operator^ (SIMDVec_u const & b) const {
return bxor(b);
}
// MBXORV
UME_FORCE_INLINE SIMDVec_u bxor(SIMDVecMask<VEC_LEN> const & mask, SIMDVec_u const & b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_retval_ptr[i] = local_ptr[i] ^ local_b_ptr[i];
else local_retval_ptr[i] = local_ptr[i];
}
return retval;
}
// BXORS
UME_FORCE_INLINE SIMDVec_u bxor(SCALAR_UINT_TYPE b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_retval_ptr[i] = local_ptr[i] ^ b;
}
return retval;
}
UME_FORCE_INLINE SIMDVec_u operator^ (SCALAR_UINT_TYPE b) const {
return bxor(b);
}
// MBXORS
UME_FORCE_INLINE SIMDVec_u bxor(SIMDVecMask<VEC_LEN> const & mask, SCALAR_UINT_TYPE b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_retval_ptr[i] = local_ptr[i] ^ b;
else local_retval_ptr[i] = local_ptr[i];
}
return retval;
}
// BXORVA
UME_FORCE_INLINE SIMDVec_u & bxora(SIMDVec_u const & b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_ptr[i] ^= local_b_ptr[i];
}
return *this;
}
UME_FORCE_INLINE SIMDVec_u & operator^= (SIMDVec_u const & b) {
return bxora(b);
}
// MBXORVA
UME_FORCE_INLINE SIMDVec_u & bxora(SIMDVecMask<VEC_LEN> const & mask, SIMDVec_u const & b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_ptr[i] ^= local_b_ptr[i];
}
return *this;
}
// BXORSA
UME_FORCE_INLINE SIMDVec_u & bxora(SCALAR_UINT_TYPE b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_ptr[i] ^= b;
}
return *this;
}
UME_FORCE_INLINE SIMDVec_u & operator^= (SCALAR_UINT_TYPE b) {
return bxora(b);
}
// MBXORSA
UME_FORCE_INLINE SIMDVec_u & bxora(SIMDVecMask<VEC_LEN> const & mask, SCALAR_UINT_TYPE b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_ptr[i] ^= b;
}
return *this;
}
// BNOT
UME_FORCE_INLINE SIMDVec_u bnot() const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_retval_ptr[i] = ~local_ptr[i];
}
return retval;
}
UME_FORCE_INLINE SIMDVec_u operator~ () const {
return bnot();
}
// MBNOT
UME_FORCE_INLINE SIMDVec_u bnot(SIMDVecMask<VEC_LEN> const & mask) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_retval_ptr[i] = ~local_ptr[i];
else local_retval_ptr[i] = local_ptr[i];
}
return retval;
}
// BNOTA
UME_FORCE_INLINE SIMDVec_u & bnota() {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_ptr[i] = ~local_ptr[i];
}
return *this;
}
// MBNOTA
UME_FORCE_INLINE SIMDVec_u & bnota(SIMDVecMask<VEC_LEN> const & mask) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_ptr[i] = ~local_ptr[i];
}
return *this;
}
// HBAND
UME_FORCE_INLINE SCALAR_UINT_TYPE hband() const {
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE retval = SCALAR_UINT_TYPE(0xFFFFFFFFFFFFFFFF);
#pragma omp simd reduction(&:retval)
for(unsigned int i = 0; i < VEC_LEN; i++) {
retval = retval & local_ptr[i];
}
return retval;
}
// MHBAND
UME_FORCE_INLINE SCALAR_UINT_TYPE hband(SIMDVecMask<VEC_LEN> const & mask) const {
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE masked_copy[VEC_LEN];
bool const * local_mask_ptr = &mask.mMask[0];
SCALAR_UINT_TYPE retval = SCALAR_UINT_TYPE(0xFFFFFFFFFFFFFFFF);
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) masked_copy[i] = local_ptr[i];
else masked_copy[i] = SCALAR_UINT_TYPE(0xFFFFFFFFFFFFFFFF);
}
#pragma omp simd reduction(&:retval)
for(unsigned int i = 0; i < VEC_LEN; i++) {
retval = retval & masked_copy[i];
}
return retval;
}
// HBANDS
UME_FORCE_INLINE SCALAR_UINT_TYPE hband(SCALAR_UINT_TYPE b) const {
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE retval = b;
#pragma omp simd reduction(&:retval)
for(unsigned int i = 0; i < VEC_LEN; i++) {
retval = retval & local_ptr[i];
}
return retval;
}
// MHBANDS
UME_FORCE_INLINE SCALAR_UINT_TYPE hband(SIMDVecMask<VEC_LEN> const & mask, SCALAR_UINT_TYPE b) const {
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE masked_copy[VEC_LEN];
bool const * local_mask_ptr = &mask.mMask[0];
SCALAR_UINT_TYPE retval = b;
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) masked_copy[i] = local_ptr[i];
else masked_copy[i] = SCALAR_UINT_TYPE(0xFFFFFFFFFFFFFFFF);
}
#pragma omp simd reduction(&:retval)
for(unsigned int i = 0; i < VEC_LEN; i++) {
retval = retval & masked_copy[i];
}
return retval;
}
// HBOR
UME_FORCE_INLINE SCALAR_UINT_TYPE hbor() const {
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE retval = SCALAR_UINT_TYPE(0);
#pragma omp simd reduction(|:retval)
for(unsigned int i = 0; i < VEC_LEN; i++) {
retval = retval | local_ptr[i];
}
return retval;
}
// MHBOR
UME_FORCE_INLINE SCALAR_UINT_TYPE hbor(SIMDVecMask<VEC_LEN> const & mask) const {
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE masked_copy[VEC_LEN];
bool const * local_mask_ptr = &mask.mMask[0];
SCALAR_UINT_TYPE retval = SCALAR_UINT_TYPE(0);
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) masked_copy[i] = local_ptr[i];
else masked_copy[i] = SCALAR_UINT_TYPE(0);
}
#pragma omp simd reduction(&:retval)
for(unsigned int i = 0; i < VEC_LEN; i++) {
retval = retval | masked_copy[i];
}
return retval;
}
// HBORS
UME_FORCE_INLINE SCALAR_UINT_TYPE hbor(SCALAR_UINT_TYPE b) const {
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE retval = b;
#pragma omp simd reduction(|:retval)
for(unsigned int i = 0; i < VEC_LEN; i++) {
retval = retval | local_ptr[i];
}
return retval;
}
// MHBORS
UME_FORCE_INLINE SCALAR_UINT_TYPE hbor(SIMDVecMask<VEC_LEN> const & mask, SCALAR_UINT_TYPE b) const {
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE masked_copy[VEC_LEN];
bool const * local_mask_ptr = &mask.mMask[0];
SCALAR_UINT_TYPE retval = b;
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) masked_copy[i] = local_ptr[i];
else masked_copy[i] = SCALAR_UINT_TYPE(0);
}
#pragma omp simd reduction(|:retval)
for(unsigned int i = 0; i < VEC_LEN; i++) {
retval = retval | masked_copy[i];
}
return retval;
}
// HBXOR
UME_FORCE_INLINE SCALAR_UINT_TYPE hbxor() const {
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE retval = SCALAR_UINT_TYPE(0);
#pragma omp simd reduction(^:retval)
for(unsigned int i = 0; i < VEC_LEN; i++) {
retval = retval ^ local_ptr[i];
}
return retval;
}
// MHBXOR
UME_FORCE_INLINE SCALAR_UINT_TYPE hbxor(SIMDVecMask<VEC_LEN> const & mask) const {
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE masked_copy[VEC_LEN];
bool const * local_mask_ptr = &mask.mMask[0];
SCALAR_UINT_TYPE retval = SCALAR_UINT_TYPE(0);
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) masked_copy[i] = local_ptr[i];
else masked_copy[i] = SCALAR_UINT_TYPE(0);
}
#pragma omp simd reduction(^:retval)
for(unsigned int i = 0; i < VEC_LEN; i++) {
retval = retval ^ masked_copy[i];
}
return retval;
}
// HBXORS
UME_FORCE_INLINE SCALAR_UINT_TYPE hbxor(SCALAR_UINT_TYPE b) const {
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE retval = b;
#pragma omp simd reduction(^:retval)
for(unsigned int i = 0; i < VEC_LEN; i++) {
retval = retval ^ local_ptr[i];
}
return retval;
}
// MHBXORS
UME_FORCE_INLINE SCALAR_UINT_TYPE hbxor(SIMDVecMask<VEC_LEN> const & mask, SCALAR_UINT_TYPE b) const {
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE masked_copy[VEC_LEN];
bool const * local_mask_ptr = &mask.mMask[0];
SCALAR_UINT_TYPE retval = b;
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) masked_copy[i] = local_ptr[i];
else masked_copy[i] = SCALAR_UINT_TYPE(0);
}
#pragma omp simd reduction(^:retval)
for(unsigned int i = 0; i < VEC_LEN; i++) {
retval = retval ^ masked_copy[i];
}
return retval;
}
// REMV
UME_FORCE_INLINE SIMDVec_u rem(SIMDVec_u const & b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_retval_ptr[i] = local_ptr[i] % local_b_ptr[i];
}
return retval;
}
UME_FORCE_INLINE SIMDVec_u operator% (SIMDVec_u const & b) const {
return rem(b);
}
// MREMV
UME_FORCE_INLINE SIMDVec_u rem(SIMDVecMask<VEC_LEN> const & mask, SIMDVec_u const & b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_retval_ptr[i] = local_ptr[i] % local_b_ptr[i];
else local_retval_ptr[i] = local_ptr[i];
}
return retval;
}
// REMS
UME_FORCE_INLINE SIMDVec_u rem(SCALAR_UINT_TYPE b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_retval_ptr[i] = local_ptr[i] % b;
}
return retval;
}
UME_FORCE_INLINE SIMDVec_u operator% (SCALAR_UINT_TYPE b) const {
return rem(b);
}
// MREMS
UME_FORCE_INLINE SIMDVec_u rem(SIMDVecMask<VEC_LEN> const & mask, SCALAR_UINT_TYPE b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_retval_ptr[i] = local_ptr[i] % b;
else local_retval_ptr[i] = local_ptr[i];
}
return retval;
}
// REMVA
UME_FORCE_INLINE SIMDVec_u & rema(SIMDVec_u const & b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_ptr[i] %= local_b_ptr[i];
}
return *this;
}
UME_FORCE_INLINE SIMDVec_u & operator%= (SIMDVec_u const & b) {
return rema(b);
}
// MREMVA
UME_FORCE_INLINE SIMDVec_u & rema(SIMDVecMask<VEC_LEN> const & mask, SIMDVec_u const & b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_ptr[i] %= local_b_ptr[i];
}
return *this;
}
// REMSA
UME_FORCE_INLINE SIMDVec_u & rema(SCALAR_UINT_TYPE b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_ptr[i] %= b;
}
return *this;
}
UME_FORCE_INLINE SIMDVec_u & operator%= (SCALAR_UINT_TYPE b) {
return rema(b);
}
// MREMSA
UME_FORCE_INLINE SIMDVec_u & rema(SIMDVecMask<VEC_LEN> const & mask, SCALAR_UINT_TYPE b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_ptr[i] %= b;
}
return *this;
}
// LANDV
UME_FORCE_INLINE SIMDVecMask<VEC_LEN> land(SIMDVec_u const & b) const {
SIMDVecMask<VEC_LEN> retval;
bool * local_retval_ptr = &retval.mMask[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_retval_ptr[i] = local_ptr[i] && local_b_ptr[i];
}
return retval;
}
// LANDS
UME_FORCE_INLINE SIMDVecMask<VEC_LEN> land(SCALAR_UINT_TYPE b) const {
SIMDVecMask<VEC_LEN> retval;
bool * local_retval_ptr = &retval.mMask[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_retval_ptr[i] = local_ptr[i] && b;
}
return retval;
}
// LORV
UME_FORCE_INLINE SIMDVecMask<VEC_LEN> lor(SIMDVec_u const & b) const {
SIMDVecMask<VEC_LEN> retval;
bool * local_retval_ptr = &retval.mMask[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_retval_ptr[i] = local_ptr[i] || local_b_ptr[i];
}
return retval;
}
// LORS
UME_FORCE_INLINE SIMDVecMask<VEC_LEN> lor(SCALAR_UINT_TYPE b) const {
SIMDVecMask<VEC_LEN> retval;
bool * local_retval_ptr = &retval.mMask[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_retval_ptr[i] = local_ptr[i] || b;
}
return retval;
}
// GATHERS
UME_FORCE_INLINE SIMDVec_u & gather(SCALAR_UINT_TYPE const * baseAddr, SCALAR_UINT_TYPE const * indices) {
for(unsigned int i = 0; i < VEC_LEN; i++)
{
mVec[i] = baseAddr[indices[i]];
}
return *this;
}
// MGATHERS
UME_FORCE_INLINE SIMDVec_u & gather(SIMDVecMask<VEC_LEN> const & mask, SCALAR_UINT_TYPE const * baseAddr, SCALAR_UINT_TYPE const * indices) {
for(unsigned int i = 0; i < VEC_LEN; i++)
{
if(mask.mMask[i] == true) mVec[i] = baseAddr[indices[i]];
}
return *this;
}
// GATHERV
UME_FORCE_INLINE SIMDVec_u & gather(SCALAR_UINT_TYPE const * baseAddr, SIMDVec_u const & indices) {
for(unsigned int i = 0; i < VEC_LEN; i++)
{
mVec[i] = baseAddr[indices.mVec[i]];
}
return *this;
}
// MGATHERV
UME_FORCE_INLINE SIMDVec_u & gather(SIMDVecMask<VEC_LEN> const & mask, SCALAR_UINT_TYPE const * baseAddr, SIMDVec_u const & indices) {
for(unsigned int i = 0; i < VEC_LEN; i++)
{
if(mask.mMask[i] == true) mVec[i] = baseAddr[indices.mVec[i]];
}
return *this;
}
// SCATTERS
UME_FORCE_INLINE SCALAR_UINT_TYPE* scatter(SCALAR_UINT_TYPE* baseAddr, SCALAR_UINT_TYPE* indices) const {
for(unsigned int i = 0; i < VEC_LEN; i++)
{
baseAddr[indices[i]] = mVec[i];
}
return baseAddr;
}
// MSCATTERS
UME_FORCE_INLINE SCALAR_UINT_TYPE* scatter(SIMDVecMask<VEC_LEN> const & mask, SCALAR_UINT_TYPE* baseAddr, SCALAR_UINT_TYPE* indices) const {
for(unsigned int i = 0; i < VEC_LEN; i++)
{
if(mask.mMask[i]) baseAddr[indices[i]] = mVec[i];
}
return baseAddr;
}
// SCATTERV
UME_FORCE_INLINE SCALAR_UINT_TYPE* scatter(SCALAR_UINT_TYPE* baseAddr, SIMDVec_u const & indices) const {
for(unsigned int i = 0; i < VEC_LEN; i++)
{
baseAddr[indices.mVec[i]] = mVec[i];
}
return baseAddr;
}
// MSCATTERV
UME_FORCE_INLINE SCALAR_UINT_TYPE* scatter(SIMDVecMask<VEC_LEN> const & mask, SCALAR_UINT_TYPE* baseAddr, SIMDVec_u const & indices) const {
for(unsigned int i = 0; i < VEC_LEN; i++)
{
if(mask.mMask[i]) baseAddr[indices.mVec[i]] = mVec[i];
}
return baseAddr;
}
// LSHV
UME_FORCE_INLINE SIMDVec_u lsh(SIMDVec_u const & b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_retval_ptr[i] = local_ptr[i] << local_b_ptr[i];
}
return retval;
}
// MLSHV
UME_FORCE_INLINE SIMDVec_u lsh(SIMDVecMask<VEC_LEN> const & mask, SIMDVec_u const & b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_retval_ptr[i] = local_ptr[i] << local_b_ptr[i];
else local_retval_ptr[i] = local_ptr[i];
}
return retval;
}
// LSHS
UME_FORCE_INLINE SIMDVec_u lsh(SCALAR_UINT_TYPE b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_retval_ptr[i] = local_ptr[i] << b;
}
return retval;
}
// MLSHS
UME_FORCE_INLINE SIMDVec_u lsh(SIMDVecMask<VEC_LEN> const & mask, SCALAR_UINT_TYPE b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_retval_ptr[i] = local_ptr[i] << b;
else local_retval_ptr[i] = local_ptr[i];
}
return retval;
}
// LSHVA
UME_FORCE_INLINE SIMDVec_u & lsha(SIMDVec_u const & b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_ptr[i] <<= local_b_ptr[i];
}
return *this;
}
// MLSHVA
UME_FORCE_INLINE SIMDVec_u & lsha(SIMDVecMask<VEC_LEN> const & mask, SIMDVec_u const & b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_ptr[i] <<= local_b_ptr[i];
}
return *this;
}
// LSHSA
UME_FORCE_INLINE SIMDVec_u & lsha(SCALAR_UINT_TYPE b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_ptr[i] <<= b;
}
return *this;
}
// MLSHSA
UME_FORCE_INLINE SIMDVec_u & lsha(SIMDVecMask<VEC_LEN> const & mask, SCALAR_UINT_TYPE b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_ptr[i] <<= b;
}
return *this;
}
// RSHV
UME_FORCE_INLINE SIMDVec_u rsh(SIMDVec_u const & b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_retval_ptr[i] = local_ptr[i] >> local_b_ptr[i];
}
return retval;
}
// MRSHV
UME_FORCE_INLINE SIMDVec_u rsh(SIMDVecMask<VEC_LEN> const & mask, SIMDVec_u const & b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_retval_ptr[i] = local_ptr[i] >> local_b_ptr[i];
else local_retval_ptr[i] = local_ptr[i];
}
return retval;
}
// RSHS
UME_FORCE_INLINE SIMDVec_u rsh(SCALAR_UINT_TYPE b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_retval_ptr[i] = local_ptr[i] >> b;
}
return retval;
}
// MRSHS
UME_FORCE_INLINE SIMDVec_u rsh(SIMDVecMask<VEC_LEN> const & mask, SCALAR_UINT_TYPE b) const {
SIMDVec_u retval;
SCALAR_UINT_TYPE * local_retval_ptr = &retval.mVec[0];
SCALAR_UINT_TYPE const * local_ptr = &mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_retval_ptr[i] = local_ptr[i] >> b;
else local_retval_ptr[i] = local_ptr[i];
}
return retval;
}
// RSHVA
UME_FORCE_INLINE SIMDVec_u & rsha(SIMDVec_u const & b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_ptr[i] >>= local_b_ptr[i];
}
return *this;
}
// MRSHVA
UME_FORCE_INLINE SIMDVec_u & rsha(SIMDVecMask<VEC_LEN> const & mask, SIMDVec_u const & b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
SCALAR_UINT_TYPE const * local_b_ptr = &b.mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_ptr[i] >>= local_b_ptr[i];
}
return *this;
}
// RSHSA
UME_FORCE_INLINE SIMDVec_u & rsha(SCALAR_UINT_TYPE b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
local_ptr[i] >>= b;
}
return *this;
}
// MRSHSA
UME_FORCE_INLINE SIMDVec_u & rsha(SIMDVecMask<VEC_LEN> const & mask, SCALAR_UINT_TYPE b) {
SCALAR_UINT_TYPE * local_ptr = &mVec[0];
bool const * local_mask_ptr = &mask.mMask[0];
#pragma omp simd safelen(VEC_LEN)
for(unsigned int i = 0; i < VEC_LEN; i++) {
if(local_mask_ptr[i] == true) local_ptr[i] >>= b;
}
return *this;
}
// ROLV
// MROLV
// ROLS
// MROLS
// ROLVA
// MROLVA
// ROLSA
// MROLSA
// RORV
// MRORV
// RORS
// MRORS
// RORVA
// MRORVA
// RORSA
// MRORSA
// DEGRADE
UME_FORCE_INLINE operator SIMDVec_u<SCALAR_UINT_LOWER_PRECISION, VEC_LEN>() const;
// PROMOTE
UME_FORCE_INLINE operator SIMDVec_u<SCALAR_UINT_HIGHER_PRECISION, VEC_LEN>() const;
// UTOI
UME_FORCE_INLINE operator SIMDVec_i<SCALAR_INT_TYPE, VEC_LEN>() const;
// UTOF
UME_FORCE_INLINE operator SIMDVec_f<SCALAR_FLOAT_TYPE, VEC_LEN>() const;
};
// SIMD NullTypes. These are used whenever a terminating
// scalar type is used as a creator function for SIMD type.
// These types cannot be instantiated, but are necessary for
// typeset to be consistent.
template<>
class SIMDVec_u<NullType<1>, 1>
{
public: // private:
SIMDVec_u() {}
~SIMDVec_u() {}
};
template<>
class SIMDVec_u<NullType<1>, 2>
{
public: // private:
SIMDVec_u() {}
~SIMDVec_u() {}
};
template<>
class SIMDVec_u<NullType<1>, 4>
{
public: // private:
SIMDVec_u() {}
~SIMDVec_u() {}
};
template<>
class SIMDVec_u<NullType<1>, 8>
{
public: // private:
SIMDVec_u() {}
~SIMDVec_u() {}
};
template<>
class SIMDVec_u<NullType<1>, 16>
{
public: // private:
SIMDVec_u() {}
~SIMDVec_u() {}
};
template<>
class SIMDVec_u<NullType<1>, 32>
{
public: // private:
SIMDVec_u() {}
~SIMDVec_u() {}
};
template<>
class SIMDVec_u<NullType<1>, 64>
{
public: // private:
SIMDVec_u() {}
~SIMDVec_u() {}
};
template<>
class SIMDVec_u<NullType<1>, 128>
{
public: // private:
SIMDVec_u() {}
~SIMDVec_u() {}
};
template<>
class SIMDVec_u<NullType<2>, 1>
{
public: // private:
SIMDVec_u() {}
~SIMDVec_u() {}
};
template<>
class SIMDVec_u<NullType<2>, 2>
{
public: // private:
SIMDVec_u() {}
~SIMDVec_u() {}
};
template<>
class SIMDVec_u<NullType<2>, 4>
{
public: // private:
SIMDVec_u() {}
~SIMDVec_u() {}
};
template<>
class SIMDVec_u<NullType<2>, 8>
{
public: // private:
SIMDVec_u() {}
~SIMDVec_u() {}
};
template<>
class SIMDVec_u<NullType<2>, 16>
{
public: // private:
SIMDVec_u() {}
~SIMDVec_u() {}
};
template<>
class SIMDVec_u<NullType<2>, 32>
{
public: // private:
SIMDVec_u() {}
~SIMDVec_u() {}
};
template<>
class SIMDVec_u<NullType<2>, 64>
{
public: // private:
SIMDVec_u() {}
~SIMDVec_u() {}
};
template<>
class SIMDVec_u<NullType<2>, 128>
{
public: // private:
SIMDVec_u() {}
~SIMDVec_u() {}
};
template<>
class SIMDVec_u<NullType<3>, 1>
{
public: // private:
SIMDVec_u() {}
~SIMDVec_u() {}
};
template<>
class SIMDVec_u<NullType<3>, 2>
{
public: // private:
SIMDVec_u() {}
~SIMDVec_u() {}
};
template<>
class SIMDVec_u<NullType<3>, 4>
{
public: // private:
SIMDVec_u() {}
~SIMDVec_u() {}
};
template<>
class SIMDVec_u<NullType<3>, 8>
{
public: // private:
SIMDVec_u() {}
~SIMDVec_u() {}
};
template<>
class SIMDVec_u<NullType<3>, 16>
{
public: // private:
SIMDVec_u() {}
~SIMDVec_u() {}
};
template<>
class SIMDVec_u<NullType<3>, 32>
{
public: // private:
SIMDVec_u() {}
~SIMDVec_u() {}
};
template<>
class SIMDVec_u<NullType<3>, 64>
{
public: // private:
SIMDVec_u() {}
~SIMDVec_u() {}
};
template<>
class SIMDVec_u<NullType<3>, 128>
{
public: // private:
SIMDVec_u() {}
~SIMDVec_u() {}
};
}
}
#endif
|
shape.h | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
/*
* shape.h
*
* Created on: Dec 28, 2015
* Author: agibsonccc
*/
#ifndef SHAPE_H_
#define SHAPE_H_
#include <cstring>
#include <cstdio>
#include "../dll.h"
#include "../nd4jmalloc.h"
#include "../templatemath.h"
#include "../helpers/logger.h"
#include "../pointercast.h"
#include "../cnpy/cnpy.h"
#include <op_boilerplate.h>
#define MAX_DIMENSION 0x7fffffff
#define MAX_NUM_THREADS 1024
#define MAX_RANK 32
#define MAX_SHAPEINFOLENGTH 2*MAX_RANK+4
#define MAX_COORD 3
#define PREALLOC_SIZE 33554432
#ifdef __CUDACC__
#include <cuda.h>
#include <cuda_runtime.h>
#endif
#ifdef __CUDACC__
#define INLINEDEF inline
#else
#define INLINEDEF inline
#endif
#include "../pairwise_util.h"
#include <stdint.h>
#include <array/ArrayOptions.h>
typedef unsigned int uint;
namespace shape {
/**
* Shape information approximating
* the information on an ndarray
*/
struct ND4J_EXPORT ShapeInformation {
_CUDA_HD ShapeInformation(Nd4jLong *shape_ = nullptr, Nd4jLong *stride_ = nullptr, char order_ = 0, int rank_ = 0, int offset_ = 0, int elementWiseStride_ = 0)
: shape(shape_), stride(stride_), order(order_), rank(rank_), offset(offset_), elementWiseStride(elementWiseStride_)
{}
Nd4jLong *shape;
Nd4jLong *stride;
char order;
int rank;
int offset;
int elementWiseStride;
};
/**
* Indexing information
* for bounds checking
*/
struct ND4J_EXPORT CurrentIndexing {
int numElementsPerThread;
int blockStartingIndex;
int startingThreadIndex;
int endingThreadIndex;
};
ND4J_EXPORT _CUDA_HD bool shapeEquals(const int shape1Rank, const Nd4jLong *shape1, const int shape2Rank, const Nd4jLong *shape2);
ND4J_EXPORT _CUDA_HD Nd4jLong* detachShape(Nd4jLong *originalShape);
ND4J_EXPORT _CUDA_HD Nd4jLong* copyShape(Nd4jLong *originalShape);
ND4J_EXPORT _CUDA_HD bool shapeEquals(const Nd4jLong *shapeInfo1, const Nd4jLong *shapeInfo2);
ND4J_EXPORT _CUDA_HD bool shapeEquals(const Nd4jLong *shapeInfo1, const Nd4jLong *shapeInfo2, const Nd4jLong *shapeInfo3);
ND4J_EXPORT _CUDA_HD bool strideEquals(int shape1Rank,Nd4jLong *shape1,int shape2Rank,Nd4jLong *shape2);
ND4J_EXPORT _CUDA_HD bool strideEquals(Nd4jLong *shapeInfo1,Nd4jLong *shapeInfo2);
ND4J_EXPORT _CUDA_HD bool strideEquals(Nd4jLong *stride1,int rank1,Nd4jLong *stride2,int rank2);
ND4J_EXPORT _CUDA_HD bool equalsSoft(const Nd4jLong *shapeA, const Nd4jLong *shapeB);
ND4J_EXPORT _CUDA_HD bool equalsTypesAndShapesSoft(const Nd4jLong *shapeA, const Nd4jLong *shapeB);
ND4J_EXPORT _CUDA_HD bool equalsStrict(const Nd4jLong *shapeA, const Nd4jLong *shapeB);
// returns true if ranks, shapes and strides are the same
ND4J_EXPORT _CUDA_HD bool haveSameShapeAndStrides(const Nd4jLong *shapeInfo1, const Nd4jLong *shapeInfo2);
ND4J_EXPORT _CUDA_HD bool haveSameShapeAndStrides(const Nd4jLong *shapeInfo1, const Nd4jLong *shapeInfo2, const Nd4jLong *shapeInfo3);
ND4J_EXPORT _CUDA_HD int sizeAt(const Nd4jLong *shape, const int dim);
template <typename T>
ND4J_EXPORT _CUDA_HD void fill(T* buffer, T value, Nd4jLong length);
ND4J_EXPORT _CUDA_HD void traceNew(int id);
ND4J_EXPORT _CUDA_HD int tadIndexForLinear(int linearIndex, int tadLength);
ND4J_EXPORT _CUDA_HD int tadLength(Nd4jLong *shapeInfo, int *dimension, int dimensionLength);
ND4J_EXPORT _CUDA_HD bool canReshape(const int oldRank, Nd4jLong* oldShape, const int newRank, Nd4jLong* newShape, bool isFOrder);
ND4J_EXPORT _CUDA_HD bool reshapeC(const int oldRank, const Nd4jLong* oldShapeInfo, const int newRank, const Nd4jLong* newShape, Nd4jLong* newShapeInfo);
/**
* Get the shape info buffer
* for the given rank and shape.
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBuffer(int rank, nd4j::DataType dtype, Nd4jLong *shape);
ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBuffer(int rank, nd4j::DataType dtype, Nd4jLong *shape, Nd4jLong *buffer);
/**
* Get the shape info buffer
* for the given rank and shape.
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBufferFortran(int rank, nd4j::DataType dtype, Nd4jLong *shape);
ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBufferFortran(int rank, nd4j::DataType dtype, Nd4jLong *shape, Nd4jLong *output);
#ifdef __CUDACC__
__device__ ND4J_EXPORT Nd4jLong *cuMalloc(Nd4jLong *buffer, long size);
#endif
/**
* Computes the standard packed array strides for a given shape.
*
* @param shape the shape of a matrix:
* @param startNum the start number for the strides
* @return the strides for a matrix of n dimensions
*/
ND4J_EXPORT _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank);
ND4J_EXPORT _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank, Nd4jLong* ret);
/**
* Computes the standard packed array strides for a given shape.
*
* @param shape the shape of a matrix:
* @param startNum the start number for the strides
* @return the strides for a matrix of n dimensions
*/
ND4J_EXPORT _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank);
ND4J_EXPORT _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank, Nd4jLong* ret);
ND4J_EXPORT _CUDA_HD void updateStrides(Nd4jLong *shape, const char order);
ND4J_EXPORT _CUDA_HD void updateStrides(const int rank, const Nd4jLong *shapeOnly, Nd4jLong *stridesOnly, const char order);
// check whether input dimensions are permuted, not permuted dimensions order have to be 0,....,rank-1
template <typename T>
ND4J_EXPORT _CUDA_HD bool isDimPermuted(const T* dimensions, const int dimSize);
/**
* Computes the standard packed array strides for a given shape.
*
* @param shape the shape of a matrix:
* @param startNum the start number for the strides
* @return the strides for a matrix of n dimensions
*/
ND4J_EXPORT _CUDA_HD Nd4jLong* calcStridesFortran(Nd4jLong *shape, int rank, int startNum);
ND4J_EXPORT _CUDA_HD Nd4jLong* calcStridesFortran(Nd4jLong *shape, int rank, int startNum, Nd4jLong* ret);
/**
* Computes the standard packed array strides for a given shape.
*
* @param shape the shape of a matrix:
* @param startNum the start number for the strides
* @return the strides for a matrix of n dimensions
*/
ND4J_EXPORT _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank, int startNum);
ND4J_EXPORT _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank, int startNum, Nd4jLong* ret);
/**
* @param toCopy the shape to copy
* @return a copy of the original struct
*/
ND4J_EXPORT _CUDA_HD ShapeInformation *shapeCopy( ShapeInformation *toCopy);
ND4J_EXPORT _CUDA_HD bool strideDescendingCAscendingF(const Nd4jLong *shapeBuffer);
ND4J_EXPORT _CUDA_HD bool isContiguous(const Nd4jLong* shapeInfo);
/**
* copy-past from java hasDefaultStridesForShape function
* check whether array is not permuted and has contiguous elements in memory
*/
ND4J_EXPORT _CUDA_HD bool areStridesDefault(const Nd4jLong* shapeInfo);
/**
* Compute the element wise stride
* for a given shape/stride configuration
* @param rank the rank of the shape/stride
* @param shape the shape
* @param stride the stride
* @param isFOrder 0 or 1 for whether the array is f
* ordered or not
* @return 0 if there is no element wise stride the
* element wise stride of reshape(1,length) otherwise
*/
ND4J_EXPORT _CUDA_HD int computeElementWiseStride(int rank, Nd4jLong *shape, Nd4jLong *stride, int isFOrder);
/**
* Compute the element wise stride
* for a given shape/stride configuration
* @param rank the rank of the shape/stride
* @param shape the shape
* @param stride the stride
* @param isFOrder 0 or 1 for whether the array is f
* ordered or not
* @return 0 if there is no element wise stride the
* element wise stride of reshape(1,length) otherwise
*/
ND4J_EXPORT _CUDA_HD int computeElementWiseStride(int rank, Nd4jLong *shape, Nd4jLong *stride, int isFOrder, Nd4jLong *dimension, int dimensionLength);
ND4J_EXPORT _CUDA_HD Nd4jLong *shapeInfoOnlyShapeAndStride(Nd4jLong *shapeInfo, Nd4jLong *dimension, int dimensionLength,bool reverseCopyStride);
ND4J_EXPORT _CUDA_HD Nd4jLong *shapeInfoOnlyShapeAndStride(Nd4jLong *shapeInfo, Nd4jLong *dimension, int dimensionLength,bool reverseCopyStride, Nd4jLong *buffer);
/**
*
* @param length
* @param shape
* @param rearrange
* @return
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *doPermuteSwap(int length, Nd4jLong *shape, int* rearrange);
/**
* In place permute swap
* @param length
* @param shape
* @param rearrange
*/
ND4J_EXPORT _CUDA_HD void doPermuteSwap(int length, Nd4jLong **shape, int* rearrange);
ND4J_EXPORT _CUDA_HD Nd4jLong *permuteShapeBuffer(Nd4jLong *shapeBuffer, int* rearrange);
ND4J_EXPORT _CUDA_HD void permuteShapeBufferInPlace(Nd4jLong *shapeBuffer, int* rearrange, Nd4jLong *out);
ND4J_EXPORT _CUDA_HD void doPermuteShapeInfo(Nd4jLong *shapeBuffer, const int *rearrange, Nd4jLong len = -1);
/**
* Rearrange the permute indexes
* according to which dimensions are specified.
*
* For example, dimension is implicitly:
* 0,1,2
*
* If you want to do a reduce along dimensions 0 and 1,
* you need to permute the indexes to be:
* 2,0,1
*
* which will give us the ability to ierate along an element
* wise stride.
*/
ND4J_EXPORT _CUDA_HD Nd4jLong* createPermuteIndexes(int originalRank, int *dimension,int dimensionLength);
ND4J_EXPORT _CUDA_HD Nd4jLong* computeResultShape(Nd4jLong *originalShapeBuffer, int *dimension,int dimensionLength);
/**
* This method does inplace transpose of given shapeBuffer
*
* @param shapeBuffer
*/
ND4J_EXPORT _CUDA_HD void transposeInplace(Nd4jLong *shapeBuffer);
/**
* Get the ordering for the device
* @param length
* @param shape
* @param stride
* @param elementStride
* @return
*/
ND4J_EXPORT _CUDA_HD char getOrder(int length, Nd4jLong *shape, Nd4jLong *stride, int elementStride);
/**
* Ensure that every value in the re arrange
* array is unique
* @param arr
* @param shape
* @param arrLength
* @param shapeLength
* @return
*/
template <typename T>
ND4J_EXPORT _CUDA_HD int checkArrangeArray(T *arr, int arrLength, int shapeLength);
/**
* Permute the shape information
* @param info the shape information to permute
* @param rearrange the order to re arrange
* @param rank the rank of the rearrange array
*/
ND4J_EXPORT _CUDA_HD void permute(ShapeInformation **info, int *rearrange, int rank);
/**
* Returns whether the
* given shape is a vector or not
* @param shape the shape of the array
* @param rank the rank of cthe shape
*/
ND4J_EXPORT _CUDA_HD int isVector(Nd4jLong *shape, int rank);
/**
* When 1 dimension is the whole length of the
* array
*/
ND4J_EXPORT _CUDA_HD int oneDimEqualToLength(Nd4jLong *shape, int rank);
ND4J_EXPORT _CUDA_HD int oneDimEqualToLength(Nd4jLong *shapeInfo);
ND4J_EXPORT _CUDA_HD int isVector(const Nd4jLong *shapeInfo);
ND4J_EXPORT _CUDA_HD bool isLikeVector(Nd4jLong *shapeInfo, int& posOfNonUnityDim);
ND4J_EXPORT _CUDA_HD bool isCommonVector(const Nd4jLong *shapeInfo, int& posOfNonUnityDim);
ND4J_EXPORT _CUDA_HD bool isRowVector(const Nd4jLong *shapeInfo);
ND4J_EXPORT _CUDA_HD bool isColumnVector(Nd4jLong *shapeInfo);
/**
* Returns whether the
* given shape is a vector or not
* @param shape the shape of the array
* @param rank the rank of the shape
*/
ND4J_EXPORT _CUDA_HD int isMatrix(Nd4jLong *shape, int rank);
INLINEDEF _CUDA_HD int isMatrix(Nd4jLong *shapeInfo);
/**
* Returns the shape portion of an information
* buffer
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *shapeOf(Nd4jLong *buffer);
/**
* Return a copy of a buffer.
* This buffer allocates memory
* that must be freed elsewhere.
*/
template <typename T>
ND4J_EXPORT _CUDA_HD T* copyOf(Nd4jLong length, T *toCopy);
template <typename T>
ND4J_EXPORT _CUDA_HD T* copyOf(Nd4jLong length, T *toCopy, T *ret);
/**
* Return a copy of a buffer.
* This buffer allocates memory
* that must be freed elsewhere.
*/
template <typename T>
ND4J_EXPORT _CUDA_HD void copyTo(Nd4jLong length, T *from, T *to);
/**
* Return a copy of a buffer.
* This buffer allocates memory
* that must be freed elsewhere.
*/
ND4J_EXPORT _CUDA_HD void copyTo(int length, Nd4jLong *from, Nd4jLong *to, Nd4jLong *indexes);
/**
* Permute the given strides
* in the given rearrange order
* @param toPermute the buffer to permute
* @param shapeRank the length of the buffer to permute
* @param rearrange the rearrange order (must be 0 based indexes
* and all must be filled in)
* @return the rearranged array
*/
//ND4J_EXPORT _CUDA_HD Nd4jLong *permutedStrides(Nd4jLong *toPermute, int shapeRank, Nd4jLong *rearrange);
/**
* Return the slice (shape + 1 in pointer arithmetic)
* @param shape the shape to take the slice of
* @return the shape array - the first entry
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *slice(Nd4jLong *shape);
ND4J_EXPORT _CUDA_HD int slices(Nd4jLong *shapeBuffer);
ND4J_EXPORT _CUDA_HD Nd4jLong *sliceOfShapeBuffer(Nd4jLong sliceIdx, Nd4jLong *shapeBuffer);
/**
* Returns the length of the
* shape information buffer:
* rank * 2 + 3
* @param rank the rank to get the shape
* info length for
* @return rank * 2 + 4
*/
ND4J_EXPORT _CUDA_HD int shapeInfoLength(int rank);
ND4J_EXPORT _CUDA_HD int shapeInfoLength(Nd4jLong* shapeInfo);
ND4J_EXPORT _CUDA_HD int shapeInfoLength(const Nd4jLong* shapeInfo);
ND4J_EXPORT _CUDA_HD size_t shapeInfoByteLength(int rank);
ND4J_EXPORT _CUDA_HD size_t shapeInfoByteLength(const Nd4jLong* shapeInfo);
ND4J_EXPORT _CUDA_HD size_t shapeInfoByteLength(const Nd4jLong* shapeInfo);
/**
* Returns the rank portion of
* an information buffer
*/
ND4J_EXPORT _CUDA_HD int rank(const Nd4jLong *shapeInfo);
ND4J_EXPORT _CUDA_HD int rank(const int *shapeInfo);
ND4J_EXPORT _CUDA_HD int rank(const unsigned int *shapeInfo);
// returns pointer on elementWiseStride
ND4J_EXPORT _CUDA_HD Nd4jLong* ews(Nd4jLong* shapeInfo);
/**
* returns pointer on elementWiseStride
*/
ND4J_EXPORT _CUDA_HD Nd4jLong* ews(Nd4jLong* shapeInfo);
/**
* Converts a raw int buffer of the layout:
* rank
* shape
* stride
* offset
* elementWiseStride
*
* where shape and stride are both straight int pointers
*/
ND4J_EXPORT _CUDA_HD ShapeInformation *infoFromBuffer(Nd4jLong *buffer);
/**
* Returns the stride portion of an information
* buffer
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *stride(Nd4jLong *buffer);
ND4J_EXPORT _CUDA_HD Nd4jLong *stride(const Nd4jLong *buffer);
/**
* Compute the length of the given shape
*/
ND4J_EXPORT _CUDA_HD bool isEmpty(const Nd4jLong *shapeInfo);
ND4J_EXPORT _CUDA_HD Nd4jLong length(const Nd4jLong *shapeInfo);
ND4J_EXPORT _CUDA_HD Nd4jLong length(std::initializer_list<int>& shape);
ND4J_EXPORT _CUDA_HD Nd4jLong length(std::initializer_list<Nd4jLong>& shape);
/***
* Returns the offset portion of an information buffer
*/
ND4J_EXPORT _CUDA_HD Nd4jLong offset(Nd4jLong *buffer);
ND4J_EXPORT _CUDA_HD Nd4jLong& extra(Nd4jLong *buffer);
/**
* Returns the ordering
* for this shape information buffer
*/
ND4J_EXPORT _CUDA_HD char order(const Nd4jLong *buffer);
/**
* Returns the type
*/
ND4J_EXPORT _CUDA_HD Nd4jLong type(const Nd4jLong* shapeInfo);
/**
* Returns the element wise stride for this information
* buffer
*/
ND4J_EXPORT _CUDA_HD Nd4jLong elementWiseStride(const Nd4jLong *buffer);
/**
* Returns the element wise stride for this information
* buffer
* relative to a dimension and ordering for a reduction index
*/
ND4J_EXPORT _CUDA_HD Nd4jLong reductionIndexElementWiseStride(Nd4jLong *buffer, int *dimension, int dimensionLength);
/**
* Returns whether
* the given shape info buffer
* represents a scalar shape
*/
ND4J_EXPORT _CUDA_HD int isScalar(Nd4jLong *info);
/**
* Returns whether
* the given shape information
* represents a scalar
* shape or not
*/
ND4J_EXPORT _CUDA_HD int isScalar(volatile ShapeInformation *info);
/**
* Return a copy of this array with the
* given index omitted
*
* @param data the data to copy
* @param indexes the index of the item to remove
* @param dataLength the length of the data array
* @param indexesLength the length of the data array
* @return the new array with the omitted
*
* item
*/
template <typename T1, typename T2>
ND4J_EXPORT _CUDA_HD void removeIndex(T1 *data, T2 *indexes, Nd4jLong dataLength, Nd4jLong indexesLength, T1 *out);
/**
* Return a copy of this array with the
* given index omitted
*
* @param data the data to copy
* @param indexes the index of the item to remove
* @param dataLength the length of the data array
* @param indexesLength the length of the data array
* @return the new array with the omitted
*
* item
*/
template <typename T1, typename T2>
ND4J_EXPORT _CUDA_HD T1* removeIndex(T1 *data, T2 *indexes, Nd4jLong dataLength, Nd4jLong indexesLength);
/**
* Iterate over a given set of indexes
* the begin and end indexes are 0 based.
* 1 padding is automatically assumed for the ending.
*
* For example if you want to iterate over 0 to 4
* it will go to 4 rather than 3.
*
* indexes should be the indexes to exclude
* indexes length should be the length of indexes
*/
ND4J_EXPORT _CUDA_HD Nd4jLong* everyIndexBut(Nd4jLong *indexes,int indexesLength,int begin,int end);
/**
* Computes the offset for accessing
* a global element given the shape information
* and the offset to be read.
*/
//#ifdef __CUDACC__
// __device__
//#endif
// ND4J_EXPORT int tadOffset(shape::ShapeInformation *xInfo, int offset);
/**
* Returns a shape
* forces the given length to be 2.
* @param shape the shape to modify
* @param dimension the dimension (row or column)
* for the shape to be returned as
* @return the new shape
*/
ND4J_EXPORT _CUDA_HD Nd4jLong* ensureVectorShape(Nd4jLong *shape);
ND4J_EXPORT _CUDA_HD Nd4jLong* createScalarShapeInfo();
ND4J_EXPORT _CUDA_HD Nd4jLong* createScalarShapeInfo(Nd4jLong *ret);
/**
* Generate an int buffer
* up to the given length
* at the specified increment
*
*/
template <typename T>
ND4J_EXPORT _CUDA_HD T* range(int from, int to, int increment);
/**
* Range between from and two with an
* increment of 1
*/
template <typename T>
ND4J_EXPORT _CUDA_HD T* range(int from, int to);
/**
* Keep the given indexes
* in the data
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *keep(volatile Nd4jLong *data, int* index, int indexLength, int dataLength);
/**
* Generate reverse copy of the data
* @param data
* @param length
* @return
*/
template <typename T>
ND4J_EXPORT _CUDA_HD T* reverseCopy(T *data, Nd4jLong length);
template <typename T>
ND4J_EXPORT _CUDA_HD void reverseCopyTo(T *from, T *to, Nd4jLong length);
template <typename T>
ND4J_EXPORT _CUDA_HD void reverseCopyTo(T *from, T *to, Nd4jLong *indexes, Nd4jLong length);
template <typename T1, typename T2>
ND4J_EXPORT _CUDA_H void convertT(T1 *from, T2 *to, Nd4jLong length);
/**
*
* @param arr1
* @param arr1Length
* @param arr2
* @param arr2Length
* @return
*/
template <typename T>
ND4J_EXPORT _CUDA_HD T* concat(T* arr1, Nd4jLong arr1Length, T* arr2, Nd4jLong arr2Length);
/**
*
* @param numArrays
* @param numTotalElements
* @param arr
* @param lengths
* @return
*/
template <typename T>
ND4J_EXPORT _CUDA_HD T* concat(int numArrays, int numTotalElements, Nd4jLong **arr, Nd4jLong *lengths);
/**
* Get the length per slice of the
* given shape and the dimension
* @param rank the rank of the shape
* @param shape the shape of to get
* the length per slice for
* @param dimension the dimension to
* get the length per slice for
* @param dimensionLength the length of the dimension array
* @return the length per slice of the given shape
* along the given dimension
*/
ND4J_EXPORT _CUDA_HD Nd4jLong lengthPerSlice(int rank, Nd4jLong *shape, int *dimension, int dimensionLength);
/**
* calculates the offset for a tensor
* @param index
* @param arr
* @param tensorShape
* @return
*/
ND4J_EXPORT _CUDA_HD Nd4jLong sliceOffsetForTensor(int rank,
int index,
Nd4jLong *shape,
Nd4jLong *tensorShape,
int tensorShapeLength,
int *dimension,
int dimensionLength);
/**
* calculates the offset for a tensor
* @param index
* @param arr
* @param tensorShape
* @return
*/
ND4J_EXPORT _CUDA_HD Nd4jLong sliceOffsetForTensor(int index,int tensorLength,int lengthPerSlice2);
/**
* Computes the tensor along dimension
* offset
* @param index the index to get the offset for the tad for
* @param rank the rank of the shapes and strides
* @param info the shape information to use for tad
* @param dimension the dimensions to use for computing the tensor along dimensions
*/
// ND4J_EXPORT _CUDA_HD int offset(int index,
// int rank,
// shape::ShapeInformation *info,
// Nd4jLong *dimension,
// int dimensionLength);
/**
* Computes the number
* of tensors along
* a given dimension
*/
ND4J_EXPORT _CUDA_HD Nd4jLong tensorsAlongDimension(int rank,
volatile int length,
volatile Nd4jLong *shape,
int *dimension,
int dimensionLength);
/**
* Computes the number
* of tensors along
* a given dimension
*/
ND4J_EXPORT _CUDA_HD Nd4jLong tensorsAlongDimension(Nd4jLong *shapeInfo, int *dimension, int dimensionLength);
/**
* Returns the tensor along dimension
* for the given block index
* @param blockSize
* @param blockIdx
* @param i
* @return
*/
ND4J_EXPORT _CUDA_HD int tadForBlockIndex(int blockSize, int blockIdx, int i);
/**
* Computes the number of tads per block
*
*/
ND4J_EXPORT _CUDA_HD int tadsPerBlock(int blockSize, int tads);
// ND4J_EXPORT _CUDA_HD Nd4jLong *tadShapeInfo(int index, Nd4jLong *xShapeInfo, Nd4jLong *dimension,
// int dimensionLength);
/**
* Returns a shape buffer
* for the shape information metadata.
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *toShapeBuffer( ShapeInformation *info);
ND4J_EXPORT _CUDA_HD Nd4jLong *toShapeBuffer( ShapeInformation *info, Nd4jLong* ret);
/**
* Returns the number of elements per thread
*/
//#ifdef __CUDACC__
// __device__
//#endif
// int numElementsPerThread(int N);
/**
* Returns the block starting index
*/
//#ifdef __CUDACC__
// __device__
//#endif
// int blockStartingIndex(int N);
/**
* Returns the thread starting index
*/
//#ifdef __CUDACC__
// __device__
//#endif
// int threadStartingIndex(int N, int stride, int offset);
/**
* Returns the thread ending index
*/
//#ifdef __CUDACC__
// __device__
//#endif
// int threadEndingIndex(int N, int stride, int offset);
/**
* Returns indexing information
* for the current kernel invocation
*/
//#ifdef __CUDACC__
// __device__
//#endif
// CurrentIndexing *currentIndex(int N, int offset, int stride);
/** Given an linear index, element wise stride
* and the length of each tad
* map a linear index to a tad
* @param i the index to map
* @param the element wise stride for the tads
* @param numElementsPerTad the number of elements
* per tad
*/
ND4J_EXPORT _CUDA_HD int tadIndex(int i, int elementWiseStride, int numElementsPerTad);
/**
* Map a tad to a
* reduction index.
* @param tadIndexForOriginal the original tad index for the
* split up problem (eg: split is dimension 3 mapping to a 2,3 problem)
* @param tadsForReduced the number of tads for the shrunk down problem (eg: 2,3)
* @param tadsForOriginal the number of tads for the smaller problem (eg: 3)
*/
ND4J_EXPORT _CUDA_HD int reductionIndexForTad(int tadIndexForOriginal, int tadsForReduced,
int tadsForOriginal);
/**
* Computes the number of tads
* per reduce index for the
* reduction tad.
*/
ND4J_EXPORT _CUDA_HD int tadsPerReduceIndex(int tadsForReduce, int tadsForOriginal);
/**
* Maps a linear index to a reduction index
* @param i the linear index to map
* @param elementWiseStride the element wise stride
* for the multiple problem
* @param tadNum the number of tads for the shrunken problem
* @param originalTadNum the tad number for the reduced version of the problem
*/
ND4J_EXPORT _CUDA_HD int reductionIndexForLinear(int i, int elementWiseStride, int numElementsPerTad,
int tadNum, int originalTadNum);
/**
* Returns the prod of the data
* up to the given length
*/
ND4J_EXPORT _CUDA_HD int prod(Nd4jLong *data, int length);
ND4J_EXPORT _CUDA_HD Nd4jLong prodLong(const Nd4jLong *data, int length);
/**
* Returns the rear most left over item not present in
* the dimension array. This assumes that the dimension array is sorted.
*
* For example, given a dimension array of:
* 0,2
*
* and
*
* 12,4,2,1 in data
*
* You end up with 1 (data[3])
* since the first item won't match
* the last item of the dimension array
*/
// ND4J_EXPORT _CUDA_HD int rearMostLeftOverItem(Nd4jLong *data,int length,Nd4jLong *dimension,int dimensionLength);
/**
* Get an offset for retrieval
* from a data buffer
* based on the given
* shape stride and given indices
* @param baseOffset the offset to start from
* @param shape the shape of the array
* @param stride the stride of the array
* @param indices the indices to iterate over
* @return the double at the specified index
*/
ND4J_EXPORT _CUDA_HD Nd4jLong getOffset(Nd4jLong baseOffset, const Nd4jLong *shape, const Nd4jLong *stride, const Nd4jLong *indices,int rank);
ND4J_EXPORT _CUDA_HD Nd4jLong* createShapeInfo(Nd4jLong *shape, Nd4jLong *stride, int rank);
ND4J_EXPORT _CUDA_HD Nd4jLong* createShapeInfo(Nd4jLong *shape, Nd4jLong *stride, int rank, Nd4jLong *buffer);
/**
* Convert a linear index to the corresponding coordinates
* for example if shape is {2, 4}, then index 5 corresponds to following coordinates
* -> [1, 1] in case of c order
* -> [1, 2] in case of f order
*/
ND4J_EXPORT _CUDA_HD void index2coords(const int rank, const Nd4jLong *shape, Nd4jLong index, Nd4jLong arrLen, Nd4jLong *coords, const char order = 'c');
ND4J_EXPORT _CUDA_HD void index2coords(const int rank, const Nd4jLong *shape, Nd4jLong index, Nd4jLong *coords, const char order = 'c');
/**
* Convert coordinates to the corresponding linear index (sequence number in other words)
* for example if shape is {2, 4}, then:
* in case of c order and coordinates [1, 1] index 5 is returned
* in case of f order and coordinates [1, 2] index 5 is returned
*/
ND4J_EXPORT _CUDA_HD Nd4jLong coords2index(const int rank, const Nd4jLong *shape, const Nd4jLong *coords, const char order = 'c');
/**
* increment n-dimensional array by one iteration by changing coord appropriately
* for example we have array with shape {2, 3}:
* - if input coord = {0,1}, then output coord = {0,2}
* - if input coord = {0,2}, then output coord = {1,0}
* so the aim is to produce following subsequence of coord: {0,0}, {0,1}, {0,2}, {1,0}, {1,1}, {1,2}
*/
/* calculates an array buffer offset for given "index" using following formula: offset = coord_0*stride_0 + coord_1*stride_1 + ... + coord_{rank-1}*stride_{rank-1}
* arrLen - array length
*/
ND4J_EXPORT _CUDA_HD uint getIndexOffset(uint index, const uint *shapeInfo, uint arrLen);
ND4J_EXPORT _CUDA_HD Nd4jLong getIndexOffset(Nd4jLong index, const Nd4jLong *shapeInfo, Nd4jLong arrLen);
ND4J_EXPORT _CUDA_HD Nd4jLong getIndexOrderOffset(Nd4jLong index, const Nd4jLong *shapeInfo, Nd4jLong arrLen, const char order);
ND4J_EXPORT _CUDA_HD Nd4jLong indexOffset(Nd4jLong index, const Nd4jLong* lShapeInfo, const uint* uShapeInfo, Nd4jLong arrLen, const bool useUnsigned);
/**
* Compute the real linear indices for the given shape and stride
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *computeIndices(int rank, Nd4jLong *shape, Nd4jLong *stride);
/**
* Compute the real linear indices for the
* given shape buffer. Shape,stride and rank are derived
* from the buffer
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *computeIndices( Nd4jLong *shapeBuffer);
ND4J_EXPORT _CUDA_HD void printShapeInfo(Nd4jLong *shapeInfo);
ND4J_EXPORT _CUDA_HD void printShapeInfoLinear(const Nd4jLong *shapeInfo);
ND4J_EXPORT _CUDA_HD void printShapeInfoLinear(const char *msg, const Nd4jLong *shapeInfo);
ND4J_EXPORT _CUDA_HD void printShapeInfoLinear(const char *msg, int rank, const Nd4jLong *shape, const Nd4jLong *strides);
ND4J_EXPORT _CUDA_HD void printIntArray(const Nd4jLong *arr, const int length);
ND4J_EXPORT _CUDA_HD void printIntArray(const int *arr, const int length);
ND4J_EXPORT _CUDA_HD void printArray(float *arr,int length);
template<typename T>
ND4J_EXPORT _CUDA_HD void printArray(T *arr,int length, const char *message);
ND4J_EXPORT _CUDA_HD Nd4jLong* shapeBufferOfNpy(int rank, unsigned int *shape,bool fortranOrder);
ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBufferOfNpy(cnpy::NpyArray arr);
// ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBufferOfNpyBuffer(char *buffer);
// this function checks the consistence of dimensions with array rank (negative dimensions, too large dimensions, too big number of dimensions)
// also sort input array of dimensions, this operation is also necessary for creating TAD object
ND4J_EXPORT _CUDA_H void checkDimensions(const int rank, std::vector<int>& dimensions);
// function calculates linear index of array min, min is sub-array of max, index to be returned is min-array's index and corresponds to maxIdx of max array
// dimsToExclude - should be sorted in increasing order
ND4J_EXPORT _CUDA_HD Nd4jLong subArrayIndex(const Nd4jLong maxIdx, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude = nullptr, const int dimsLen = -1);
// function calculates absolute offset of min array, min is sub-array of max, offset to be returned corresponds to maxIdx of max array
// dimsToExclude - should be sorted in increasing order
ND4J_EXPORT _CUDA_HD Nd4jLong subArrayOffset(const Nd4jLong maxIdx, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude = nullptr, const int dimsLen = -1);
// max array is outer for min array, min array is sub-array of max array
// function calculates the coordinates of min array (and saves them into minIdxs) given coordinates of max array (already stored in maxIdxs)
// dimsToExclude - should be sorted in increasing order
// dimsLen - length of dimsToExclude, if not set (= -1), then it is calculated as maxRank - minRank
ND4J_EXPORT _CUDA_HD void maxIndToMinInd(Nd4jLong* maxIdxs, Nd4jLong* minIdxs, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude = nullptr, const int dimsLen = -1);
// calculate indexes of max-array, these output indexes correspond to one minIdx index of min-array which is sub-array of max-array
// dimsToExclude - should be sorted in increasing order
ND4J_EXPORT _CUDA_HD int outerArrayIndexes(Nd4jLong* maxIdxs, const Nd4jLong minIdx, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude = nullptr);
// calculate offsets of max-array, these output offsets correspond to one minIdx index of min-array which is sub-array of max-array
// dimsToExclude - should be sorted in increasing order
ND4J_EXPORT _CUDA_HD int outerArrayOffsets(Nd4jLong* maxOffsets, const Nd4jLong minIdx, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude = nullptr);
// calculates offsets for entities (elements or sub-arrays), shape in context of sub-array means dimensions excluded from outer array
// rank is equal to size of shape
ND4J_EXPORT void calcOffsets(const int rank, const Nd4jLong* shape, const Nd4jLong* strides, Nd4jLong* offsets, const char order = 'c');
ND4J_EXPORT void calcOffsets(const Nd4jLong* shapeInfo, Nd4jLong* offsets, const char order = 'c');
ND4J_EXPORT void calcOffsets(const Nd4jLong *xShapeInfo, Nd4jLong*& xOffsets, const Nd4jLong *yShapeInfo, Nd4jLong*& yOffsets, const char order = 'c');
ND4J_EXPORT void calcOffsets(const Nd4jLong *xShapeInfo, Nd4jLong*& xOffsets, const Nd4jLong *yShapeInfo, Nd4jLong*& yOffsets, const Nd4jLong* zShapeInfo, Nd4jLong*& zOffsets, const char order = 'c');
ND4J_EXPORT _CUDA_HD void shapeOldScalar(nd4j::DataType dtype, Nd4jLong* const buffer, const char order);
// deduce element-wise stride
// if array is scalar or unit length vector then ews = 1
// if array is common vector then ews = stride of non-unity dimension
// if strides are normal set ews = 1, otherwise ews = 0
ND4J_EXPORT _CUDA_HD void setEws(Nd4jLong* shapeInfo, Nd4jLong len);
// deduce order and element-wise stride
// if array is scalar or unit length vector then ews = 1 and order is preserved
// if array is common vector then ews = stride of non-unity dimension and order is preserved
// if strides are normal/contiguous then ews = 1 and corresponding order is set, otherwise ews = 0 and order is preserved
ND4J_EXPORT _CUDA_HD void setOrderAndEws(Nd4jLong* shapeInfo, Nd4jLong len = -1);
/**
* processes whole set of sub-arrays
* evaluates shapeInfo of sub-arrays (all sub-arrays have the same shapeInfo) and their buffer offsets (each sub-array has its own unique offset from original this-buffer)
* arguments:
* wholeShapeInfo - original shapeInfo of whole array
* numOfSubArrs - number of sub-arrays, size of subArrOffsets is equal to numOfSubArrs
* dimsSize - size of dimsToExclude, if dimsSize = array rank or dimsSize = 0 it means sub-array is whole array, copy of wholeShapeInfo and one zero offset will be returned
* dimsToExclude - MUST BE SORTED, dimensions to evaluate sub-array along, i.e. when shape is [2,3,4,5] and dimsToExclude={0,2}, then there will be 8 sub-arrays with shape [3,5]
* subArrShapeInfo - output argument, contains shapeInfo common for all sub-arrays
* subArrOffsets - output argument, contains successive sub-arrays offsets from original this-buffer
* keepUnitiesInShape - if false then eliminate unities from sub-array shapeInfo, for example {1,a,1,b} -> {a,b}
*/
ND4J_EXPORT _CUDA_HD void calcSubArrShapeAndOffsets(const Nd4jLong* wholeShapeInfo, const Nd4jLong numOfSubArrs, const int dimsSize, const int* dimsToExclude, Nd4jLong* subArrShapeInfo, Nd4jLong* subArrOffsets, bool keepUnitiesInShape = false);
//END HEADERS
//BEGIN IMPLEMENTATIONS
#ifdef __CUDACC__
/**
* BEWARE: THIS METHOD DOES NOT CHECKS ALLOCATION BOUNDARIES
*/
__device__ INLINEDEF Nd4jLong *cuMalloc(Nd4jLong *buffer, long size) {
Nd4jLong *ret = buffer;
ret += (threadIdx.x * size);
return ret;
}
#endif
/**
* Length of a tad given
* the shape information
*/
INLINEDEF _CUDA_HD int tadLength(Nd4jLong *shapeInfo, int *dimension, int dimensionLength) {
if(dimensionLength == 1) {
return shape::shapeOf(shapeInfo)[dimension[0]];
}
else {
int ret = 1;
for(int i = 0; i < shape::rank(shapeInfo); i++) {
for(int j = 0; j < dimensionLength; j++) {
if(i == dimension[j])
ret *= shape::shapeOf(shapeInfo)[dimension[j]];
}
}
return ret;
}
}
/**
* Tad element wise stride:
* given the inner most dimension (the sorted dimension of the last)
* the element wise stride of the tad (disregarding order) is the
* last dimension's stride.
*
* For a given singular dimension this will just be the only entry.
* For example, given the following c order shape/stride:
* 2,2,3,2
* 12,6,2,1
*
* The tad element wise stride for 3 will be 1.
* For zero it wil be 12
*
* For 2,3 it's 1
*
* Note here that the multi dimensional 2,3 case
* is equivalent to the singular 3 case.
*
*
* Note that this is for the dimension that ultimately
* ends up removed.
*
* Again: this may not preserve ordering of the tad
* but maybe used for reductions.
*/
INLINEDEF _CUDA_HD int tadElementWiseStride(Nd4jLong *shapeInfo, int *dimension,int dimensionLength) {
return reductionIndexElementWiseStride(shapeInfo,dimension,dimensionLength);
}
INLINEDEF _CUDA_HD bool shapeEquals(const int shape1Rank, const Nd4jLong *shape1, const int shape2Rank, const Nd4jLong *shape2) {
if(shape1Rank != shape2Rank)
return false;
//rank not equals
for(int i = 0; i < shape1Rank; i++) {
if(shape1[i] != shape2[i])
return false;
}
return true;
}
INLINEDEF _CUDA_HD bool shapeEquals(const Nd4jLong *shapeInfo1, const Nd4jLong *shapeInfo2) {
return shape::shapeEquals(shape::rank(shapeInfo1), shape::shapeOf(const_cast<Nd4jLong*>(shapeInfo1)), shape::rank(shapeInfo2), shape::shapeOf(const_cast<Nd4jLong*>(shapeInfo2)));
}
INLINEDEF _CUDA_HD bool shapeEquals(const Nd4jLong *shapeInfo1, const Nd4jLong *shapeInfo2, const Nd4jLong *shapeInfo3) {
return shape::shapeEquals(shapeInfo1, shapeInfo2) && shape::shapeEquals(shapeInfo1, shapeInfo3);
}
INLINEDEF _CUDA_HD bool strideEquals(int shape1Rank,Nd4jLong *shape1,int shape2Rank,Nd4jLong *shape2) {
if(shape1Rank != shape2Rank)
return false;
//rank not equals
for(int i = 0; i < shape1Rank; i++) {
if(shape1[i] != shape2[i])
return false;
}
return true;
}
INLINEDEF _CUDA_HD bool strideEquals(Nd4jLong *shapeInfo1,Nd4jLong *shapeInfo2) {
return shape::strideEquals(shape::rank(shapeInfo1),shape::stride(shapeInfo1),shape::rank(shapeInfo2),shape::stride(shapeInfo2));
}
INLINEDEF _CUDA_HD bool strideEquals(Nd4jLong *stride1,int rank1 , Nd4jLong *stride2, int rank2) {
if(rank1 != rank2)
return false;
for(int i = 0; i < rank1; i++) {
if(stride1[i] != stride2[i])
return false;
}
return true;
}
INLINEDEF _CUDA_HD Nd4jLong *computeResultShape(Nd4jLong *originalShapeBuffer, int* dimension,int dimensionLength) {
Nd4jLong *retShape;
int retShapeLength;
if(dimensionLength == 1 && dimension[0] == 2147483647) {
retShape = new Nd4jLong[2];
retShape[0] = 1;
retShape[1] = 1;
retShapeLength = 2;
}
else {
retShape = shape::removeIndex<Nd4jLong, int>(shape::shapeOf(originalShapeBuffer), dimension, shape::shapeInfoLength(shape::rank(originalShapeBuffer)), dimensionLength);
retShapeLength = shape::rank(originalShapeBuffer) - dimensionLength;
}
//ensure vector is proper shape
if (retShapeLength == 1) {
if (dimension[0] == 0) {
auto newRetShape = new Nd4jLong[2]{1, retShape[0]};
delete[] retShape;
retShape = newRetShape;
retShapeLength = 2;
}
else {
auto newRetShape = new Nd4jLong[2]{retShape[0], 1};
delete[] retShape;
retShape = newRetShape;
retShapeLength = 2;
}
} else if (retShapeLength == 0) {
auto newRetShape = new Nd4jLong[2]{1, 1};
delete[] retShape;
retShape = newRetShape;
retShapeLength = 2;
}
auto ret = shape::shapeBuffer(retShapeLength, nd4j::ArrayOptions::dataType(originalShapeBuffer), retShape);
delete[] retShape;
return ret;
}
INLINEDEF _CUDA_HD Nd4jLong *shapeInfoOnlyShapeAndStride(Nd4jLong *shapeInfo, Nd4jLong *dimension, int dimensionLength,bool reverseCopyStride, Nd4jLong *buffer) {
Nd4jLong *theShape = shape::shapeOf(shapeInfo);
Nd4jLong *theStride = shape::stride(shapeInfo);
int rank = dimensionLength == 1 ? 2 : dimensionLength;
Nd4jLong *ret = buffer;
//set the rank
ret[0] = rank;
Nd4jLong *retShape = shape::shapeOf(ret);
Nd4jLong *retStride = shape::stride(ret);
int len = rank;
if(dimensionLength == 1) {
if(shape::isMatrix(theShape,shape::rank(shapeInfo))) {
if(dimension[0] == 0) {
Nd4jLong newStride[2] = {theStride[dimension[0]],1};
Nd4jLong newShape[2] = {theShape[dimension[0]],1};
retShape[0] = newShape[0];
retShape[1] = newShape[1];
retStride[0] = newStride[0];
retStride[1] = newStride[1];
}
else {
Nd4jLong newStride[2] = {theStride[dimension[0]],1};
Nd4jLong newShape[2] = {theShape[dimension[0]],1};
retShape[0] = newShape[0];
retShape[1] = newShape[1];
retStride[0] = newStride[0];
retStride[1] = newStride[1];
}
}
else {
Nd4jLong newStride[2] = {1,theStride[dimension[0]]};
Nd4jLong newShape[2] = {1,theShape[dimension[0]]};
retShape[0] = newShape[0];
retShape[1] = newShape[1];
retStride[0] = newStride[0];
retStride[1] = newStride[1];
}
}
else {
Nd4jLong *newIndexes = dimension;
if(reverseCopyStride)
shape::reverseCopyTo(theStride, retStride, newIndexes, len);
else
shape::copyTo(len, theStride, retStride, newIndexes);
shape::copyTo(len, theShape, retShape, newIndexes);
}
ret[shape::shapeInfoLength(rank) - 1] = shape::order(shapeInfo);
return ret;
}
INLINEDEF _CUDA_HD Nd4jLong *shapeInfoOnlyShapeAndStride(Nd4jLong *shapeInfo, Nd4jLong *dimension, int dimensionLength,bool reverseCopyStride) {
int rank = dimensionLength == 1 ? 2 : dimensionLength;
traceNew(4);
Nd4jLong *ret = new Nd4jLong[shape::shapeInfoLength(rank)];
return shapeInfoOnlyShapeAndStride(shapeInfo, dimension, dimensionLength, reverseCopyStride, ret);
}
INLINEDEF _CUDA_HD Nd4jLong * createShapeInfo(Nd4jLong *shape, Nd4jLong *stride, int rank) {
traceNew(5);
Nd4jLong *ret = new Nd4jLong[shape::shapeInfoLength(rank)];
return createShapeInfo(shape, stride, rank, ret);
}
INLINEDEF _CUDA_HD Nd4jLong * createShapeInfo(Nd4jLong *shape, Nd4jLong *stride, int rank, Nd4jLong *buffer) {
buffer[0] = rank;
Nd4jLong *retShape = shape::shapeOf(buffer);
Nd4jLong *retStride = shape::stride(buffer);
for(int i = 0;i < rank; i++) {
retShape[i] = shape[i];
retStride[i] = stride[i];
}
return buffer;
}
/**
* Computes the standard packed array strides for a given shape.
*
* @param shape the shape of a matrix:
* @param startNum the start number for the strides
* @return the strides for a matrix of n dimensions
*/
INLINEDEF _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank, int startNum) {
if (isVector(shape, rank)) {
traceNew(5);
Nd4jLong *ret = new Nd4jLong[2];
for (int i = 0; i < 2; i++)
ret[i] = 1;
return ret;
}
int dimensions = rank;
traceNew(6);
Nd4jLong *stride = new Nd4jLong[dimensions];
int st = startNum;
for (int j = 0; j < rank; j++) {
stride[j] = st;
st *= shape[j];
}
return stride;
}
INLINEDEF _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank, int startNum, Nd4jLong *ret) {
if (isVector(shape, rank)) {
for (int i = 0; i < rank; i++)
ret[i] = 1;
return ret;
}
//int dimensions = rank;
int st = startNum;
for (int j = 0; j < rank; j++) {
ret[j] = st;
st *= shape[j];
}
return ret;
}
/**
* Computes the standard packed array strides for a given shape.
*
* @param shape the shape of a matrix:
* @param startNum the start number for the strides
* @return the strides for a matrix of n dimensions
*/
INLINEDEF _CUDA_HD Nd4jLong * calcStrides(Nd4jLong *shape, int rank, int startNum) {
traceNew(7);
Nd4jLong *stride = new Nd4jLong[rank];
if (rank == 1) {
stride[0] = 1;
return stride;
}
// if (shape::isVector(shape, rank)) {
// for (int i = 0; i < 2; i++)
// stride[i] = 1;
// return stride;
// }
int st = startNum;
for (int j = rank - 1; j >= 0; j--) {
stride[j] = st;
st *= shape[j];
}
return stride;
}
INLINEDEF _CUDA_HD Nd4jLong * calcStrides(Nd4jLong *shape, int rank, int startNum, Nd4jLong* ret) {
if (rank == 1) {
ret[0] = 1;
return ret;
}
// if (shape::isVector(shape, rank)) {
// for (int i = 0; i < 2; i++)
// ret[i] = 1;
// return ret;
// }
int st = startNum;
for (int j = rank - 1; j >= 0; j--) {
ret[j] = st;
st *= shape[j];
}
return ret;
}
/**
* Computes the standard packed array strides for a given shape.
*
* @param shape the shape of a matrix:
* @param startNum the start number for the strides
* @return the strides for a matrix of n dimensions
*/
INLINEDEF _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank) {
return calcStridesFortran(shape, rank, 1);
}
INLINEDEF _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank, Nd4jLong* ret) {
return calcStridesFortran(shape, rank, 1, ret);
}
/**
* Computes the standard packed array strides for a given shape.
*
* @param shape the shape of a matrix:
* @param startNum the start number for the strides
* @return the strides for a matrix of n dimensions
*/
INLINEDEF _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank) {
return calcStrides(shape, rank, 1);
}
INLINEDEF _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank, Nd4jLong* ret) {
return calcStrides(shape, rank, 1, ret);
}
//////////////////////////////////////////////////////////////////////
INLINEDEF _CUDA_HD void updateStrides(Nd4jLong *shapeInfo, const char order) {
int rank = shapeInfo[0];
int doubleRank = 2*rank;
if (rank > 0) {
if (order == 'c') {
shapeInfo[doubleRank] = 1; // set unity as last stride for c order
for (int j = 1; j < rank; ++j) {
shapeInfo[doubleRank - j] = shapeInfo[doubleRank - j + 1] * shapeInfo[rank + 1 - j];
}
} else {
shapeInfo[rank + 1] = 1; // set unity as first stride for f order
for (int j = rank + 1; j < doubleRank; ++j) {
shapeInfo[j + 1] = shapeInfo[j] * shapeInfo[j - rank];
}
}
}
// set last 2 elements in shapeInfo
shapeInfo[doubleRank + 2] = 1;
shapeInfo[doubleRank + 3] = (int)order;
}
//////////////////////////////////////////////////////////////////////
INLINEDEF _CUDA_HD void updateStrides(const int rank, const Nd4jLong *shapeOnly, Nd4jLong *stridesOnly, const char order) {
if (rank > 0) {
if (order == 'c') {
stridesOnly[rank - 1] = 1; // set unity as last stride for c order
for (int j = 1; j < rank; ++j)
stridesOnly[rank - 1 - j] = stridesOnly[rank - j] * shapeOnly[rank - j];
}
else {
stridesOnly[0] = 1; // set unity as first stride for f order
for (int j = 1; j < rank; ++j) {
stridesOnly[j] = stridesOnly[j - 1] * shapeOnly[j - 1];
}
}
}
}
// check whether input dimensions are permuted, not permuted dimensions order have to be 0,....,rank-1
template <typename T>
INLINEDEF _CUDA_HD bool isDimPermuted(const T* dimensions, const Nd4jLong dimSize ) {
for(int i=0; i<dimSize-1; ++i)
if(dimensions[i] > dimensions[i+1])
return true;
return false;
}
/**
* @param toCopy the shape to copy
* @return a copy of the original struct
*/
INLINEDEF _CUDA_HD ShapeInformation *shapeCopy( ShapeInformation *toCopy) {
auto copy = new ShapeInformation;
traceNew(8);
copy->shape = new Nd4jLong[toCopy->rank];
memcpy(copy->shape, toCopy->shape, toCopy->rank * sizeof(Nd4jLong));
traceNew(9);
copy->stride = new Nd4jLong[toCopy->rank];
for (int i = 0; i < toCopy->rank; i++) {
copy->stride[i] = toCopy->stride[i];
}
copy->order = toCopy->order;
copy->rank = toCopy->rank;
copy->offset = toCopy->offset;
copy->elementWiseStride = toCopy->elementWiseStride;
return copy;
}
INLINEDEF _CUDA_HD int computeElementWiseStride(int rank, Nd4jLong *shape, Nd4jLong *stride, int isFOrder) {
if (rank == 0)
return 1;
if(shape::isVector(shape,rank)) {
return stride[rank - 1];
}
else {
int oldnd;
Nd4jLong *oldDims = shape::copyOf(rank, shape);
Nd4jLong *oldStrides = shape::copyOf(rank, stride);
int np, op, last_stride;
int oldStart, oldStop, ok, newStart, newStop, nk;
traceNew(10);
auto newStrides = new Nd4jLong[rank];
oldnd = 0;
//set the shape to be 1 x length
int newShapeRank = 2;
auto newShape = new Nd4jLong[newShapeRank];
newShape[0] = 1;
newShape[1] = shape::prodLong(shape, rank);
/*
* Remove axes with dimension 1 from the old array. They have no effect
* but would need special cases since their strides do not matter.
*/
for (oldStart = 0; oldStart < rank; oldStart++) {
if (shape[oldStart] != 1) {
oldDims[oldnd] = shape[oldStart];
oldStrides[oldnd] = stride[oldStart];
oldnd++;
}
}
np = 1;
for (newStart = 0; newStart < newShapeRank; newStart++) {
np *= newShape[newStart];
}
op = 1;
for (oldStart = 0; oldStart < oldnd; oldStart++) {
op *= oldDims[oldStart];
}
if (np != op) {
/* different total sizes; no hope */
delete[] newStrides;
delete[] newShape;
delete[] oldStrides;
delete[] oldDims;
return 0;
}
if (np == 0) {
/* the current code does not handle 0-sized arrays, so give up */
delete[] newStrides;
delete[] newShape;
delete[] oldStrides;
delete[] oldDims;
return 0;
}
/* oldStart to oldStop and newStart to newStop give the axis ranges currently worked with */
oldStart = 0;
oldStop = 1;
newStart = 0;
newStop = 1;
while (newStart < newShapeRank && oldStart < oldnd) {
np = newShape[newStart];
op = oldDims[oldStart];
while (np != op) {
if (np < op) {
/* Misses trailing 1s, these are handled later */
np *= newShape[newStop++];
} else {
op *= oldDims[oldStop++];
}
}
/* Check whether the original axes can be combined */
for (ok = oldStart; ok < oldStop - 1; ok++) {
if (isFOrder) {
if (oldStrides[ok + 1] != oldDims[ok] * oldStrides[ok]) {
/* not contiguous enough */
delete[] newStrides;
delete[] newShape;
delete[] oldStrides;
delete[] oldDims;
return 0;
}
} else {
/* C order */
if (oldStrides[ok] != oldDims[ok + 1] * oldStrides[ok + 1]) {
/* not contiguous enough */
delete[] newStrides;
delete[] newShape;
delete[] oldStrides;
delete[] oldDims;
return 0;
}
}
}
/* Calculate new strides for all axes currently worked with */
if (isFOrder) {
newStrides[newStart] = oldStrides[oldStart];
for (nk = newStart + 1; nk < newStop; nk++) {
newStrides[nk] = newStrides[nk - 1] * newShape[nk - 1];
}
} else {
/* C order */
newStrides[newStop - 1] = oldStrides[oldStop - 1];
for (nk = newStop - 1; nk > newStart; nk--) {
newStrides[nk - 1] = newStrides[nk] * newShape[nk];
}
}
newStart = newStop++;
oldStart = oldStop++;
}
/*
* Set strides corresponding to trailing 1s of the new shape.
*/
if (newStart >= 1) {
last_stride = newStrides[newStart - 1];
} else {
last_stride = stride[rank - 1];
}
if (isFOrder) {
if (newStart >= 1)
last_stride *= newShape[newStart - 1];
}
for (nk = newStart; nk < newShapeRank; nk++) {
newStrides[nk] = last_stride;
}
//returns the last element of the new stride array
int ret = last_stride;
delete[] newStrides;
delete[] newShape;
delete[] oldStrides;
delete[] oldDims;
return ret;
}
}
INLINEDEF _CUDA_HD int computeElementWiseStride(int rank, Nd4jLong *shape, Nd4jLong *stride, int isFOrder,
Nd4jLong *dimension, int dimensionLength) {
if(dimensionLength == 1) {
return stride[dimension[0]];
}
return 0;
}
/**
* Get the shape info buffer
* for the given rank and shape.
*/
INLINEDEF _CUDA_HD Nd4jLong *shapeBuffer(int rank, nd4j::DataType dtype, Nd4jLong *shape) {
Nd4jLong *stride = shape::calcStrides(shape, rank);
traceNew(11);
auto shapeInfo = new shape::ShapeInformation();
shapeInfo->shape = shape;
shapeInfo->stride = stride;
shapeInfo->offset = 0;
shapeInfo->rank = rank;
int elementWiseStride = shape::computeElementWiseStride(rank, shape, stride, 0);
shapeInfo->order = 'c';
shapeInfo->elementWiseStride = elementWiseStride;
auto shapeInfoBuffer = shape::toShapeBuffer(shapeInfo);
delete[] stride;
delete shapeInfo;
nd4j::ArrayOptions::setDataType(shapeInfoBuffer, dtype);
return shapeInfoBuffer;
}
/**
* This is special method, it returns ONLY 2D shapebuffer.
*
* This method is used only for SoftMax
*/
INLINEDEF _CUDA_HD Nd4jLong *shapeBuffer(int rank, nd4j::DataType dtype, Nd4jLong *shape, Nd4jLong *buffer) {
Nd4jLong stride[MAX_RANK];
shape::calcStrides(shape,rank, stride);
shape::ShapeInformation shapeInfo;
shapeInfo.shape = shape;
shapeInfo.stride = stride;
shapeInfo.offset = 0;
shapeInfo.rank = rank;
auto elementWiseStride = shape::computeElementWiseStride(rank, shape, stride, 0);
shapeInfo.order = 'c';
shapeInfo.elementWiseStride = elementWiseStride;
shape::toShapeBuffer(&shapeInfo, buffer);
nd4j::ArrayOptions::setDataType(buffer, dtype);
return buffer;
}
/**
* Get the shape info buffer
* for the given rank and shape.
*/
INLINEDEF _CUDA_HD Nd4jLong *shapeBufferFortran(int rank, nd4j::DataType dtype, Nd4jLong *shape) {
auto stride = shape::calcStridesFortran(shape,rank);
traceNew(12);
auto shapeInfo = new shape::ShapeInformation();
shapeInfo->shape = shape;
shapeInfo->stride = stride;
shapeInfo->offset = 0;
shapeInfo->rank = rank;
int elementWiseStride = shape::computeElementWiseStride(rank, shape, stride, 0);
shapeInfo->order = 'f';
shapeInfo->elementWiseStride = elementWiseStride;
auto shapeInfoBuffer = shape::toShapeBuffer(shapeInfo);
delete[] stride;
delete shapeInfo;
nd4j::ArrayOptions::setDataType(shapeInfoBuffer, dtype);
return shapeInfoBuffer;
}
INLINEDEF _CUDA_HD Nd4jLong *shapeBufferFortran(int rank, nd4j::DataType dtype, Nd4jLong *shape, Nd4jLong *output) {
Nd4jLong stride[MAX_RANK];
shape::calcStridesFortran(shape,rank, stride);
shape::ShapeInformation shapeInfo;
shapeInfo.shape = shape;
shapeInfo.stride = stride;
shapeInfo.offset = 0;
shapeInfo.rank = rank;
auto elementWiseStride = shape::computeElementWiseStride(rank, shape, stride, 0);
shapeInfo.order = 'f';
shapeInfo.elementWiseStride = elementWiseStride;
shape::toShapeBuffer(&shapeInfo, output);
nd4j::ArrayOptions::setDataType(output, dtype);
return output;
}
/**
* Compute the real linear indices for the given shape and stride
*/
INLINEDEF _CUDA_HD Nd4jLong *computeIndices(int rank, Nd4jLong *shape, Nd4jLong *stride) {
Nd4jLong length = shape::prodLong(shape,rank);
traceNew(13);
Nd4jLong *ret = new Nd4jLong[length];
for(int i = 0; i < length; i++) {
Nd4jLong *idx = new Nd4jLong[rank];
shape::index2coords(rank, shape, i, idx, 'f');
ret[i] = shape::getOffset(0, shape, stride, idx, rank);
delete[] idx;
}
return ret;
}
/**
* Compute the real linear indices for the given shape and stride
*/
INLINEDEF _CUDA_HD Nd4jLong *computeIndices(Nd4jLong *shapeBuffer) {
return computeIndices(shape::rank(shapeBuffer),shape::shapeOf(shapeBuffer),shape::stride(shapeBuffer));
}
//////////////////////////////////////////////////////////////////////
INLINEDEF _CUDA_HD Nd4jLong coords2index(const int rank, const Nd4jLong *shape, const Nd4jLong *indices, const char order) {
Nd4jLong index, shift = 1;;
if(order == 'c') {
index = indices[rank - 1];
for(int i = rank - 2; i >= 0; --i) {
shift *= shape[i + 1];
index += shift * indices[i];
}
}
else {
index = indices[0];
for(int i = 1; i < rank; ++i) {
shift *= shape[i - 1];
index += shift * indices[i];
}
}
return index;
}
template <typename T>
INLINEDEF _CUDA_HD void fill(T* buffer, T value, Nd4jLong length) {
PRAGMA_OMP_SIMD
for (int e = 0; e < length; e++)
buffer[e] = value;
}
//////////////////////////////////////////////////////////////////////
INLINEDEF _CUDA_HD Nd4jLong getIndexOffset(Nd4jLong index, const Nd4jLong *shapeInfo, Nd4jLong arrLen) {
const Nd4jLong ews = shapeInfo[shapeInfo[0] + shapeInfo[0] + 2];
if(ews > 0 && order(shapeInfo) == 'c')
if (ews == 1)
return index;
else
return ews * index;
Nd4jLong offset = 0;
Nd4jLong rank = shapeInfo[0];
for(int i = 1; i <= shapeInfo[0]; ++i) {
arrLen /= shapeInfo[i];
if(arrLen > 0 && shapeInfo[i] > 1) {
offset += (index / arrLen) * shapeInfo[i + rank];
index %= arrLen;
}
}
return offset;
}
INLINEDEF _CUDA_HD uint getIndexOffset(uint index, const uint *shapeInfo, uint arrLen) {
const uint rank = shapeInfo[0];
const uint ews = shapeInfo[rank + rank + 2];
if(ews > 0 && shapeInfo[rank + rank + 3] == 99)
if (ews == 1)
return index;
else
return ews * index;
uint offset = 0;
for(uint i = 1; i <= rank; ++i) {
arrLen /= shapeInfo[i];
if(arrLen > 0 && shapeInfo[i] > 1) {
offset += (index / arrLen) * shapeInfo[i + rank];
index %= arrLen;
}
}
return offset;
}
INLINEDEF _CUDA_HD Nd4jLong indexOffset(Nd4jLong index, const Nd4jLong* lShapeInfo, const uint* uShapeInfo, Nd4jLong arrLen, const bool useUnsigned) {
if(useUnsigned)
return getIndexOffset(static_cast<uint>(index), uShapeInfo, static_cast<uint>(arrLen));
return getIndexOffset(index, lShapeInfo, arrLen);
}
//////////////////////////////////////////////////////////////////////
INLINEDEF _CUDA_HD Nd4jLong getIndexOrderOffset(Nd4jLong index, const Nd4jLong *shapeInfo, Nd4jLong arrLen, const char order) {
Nd4jLong offset = 0;
if(order == 'c') {
for(int i = 1; i <= *shapeInfo; ++i) {
arrLen /= shapeInfo[i];
if(arrLen > 0 && shapeInfo[i] > 1) {
offset += (index / arrLen) * shapeInfo[i + *shapeInfo];
index %= arrLen;
}
}
}
else {
for(int i = *shapeInfo; i >= 1 ; --i) {
arrLen /= shapeInfo[i];
if(arrLen > 0 && shapeInfo[i] > 1) {
offset += (index / arrLen) * shapeInfo[i + *shapeInfo];
index %= arrLen;
}
}
}
return offset;
}
/**
*
* @param length
* @param shape
* @param rearrange
* @return
*/
INLINEDEF _CUDA_HD Nd4jLong *doPermuteSwap(int length, Nd4jLong *shape, int *rearrange) {
traceNew(16);
Nd4jLong *ret = new Nd4jLong[length];
for (int i = 0; i < length; i++) {
ret[i] = shape[rearrange[i]];
}
return ret;
}
/**
*
* @param length
* @param shape
* @param rearrange
* @return
*/
INLINEDEF _CUDA_HD void doPermuteSwap(int length, Nd4jLong **shape, int *rearrange) {
if(length == 1) {
return;
}
else {
Nd4jLong *shapeDeref = *shape;
if(shape::prodLong(shapeDeref,length) < 2) {
return;
}
}
bool inOrder = true;
for(int i = 0; i < length - 1; i++) {
inOrder = inOrder && rearrange[i] + 1 == rearrange[i + 1];
}
//all in order, nothing to do
if(inOrder)
return;
Nd4jLong *shapeDeref = *shape;
//we know they are just reversed, dimension length of 2
if(length == 2) {
auto shapeFirst = shapeDeref[0];
auto shapeSecond = shapeDeref[1];
shapeDeref[0] = shapeSecond;
shapeDeref[1] = shapeFirst;
return;
}
else if(length == 1) {
//no permute
return;
}
auto temp = new Nd4jLong[length];
memcpy(temp,shapeDeref,sizeof(Nd4jLong) * length);
for (int i = 0; i < length; i++) {
shapeDeref[i] = temp[rearrange[i]];
}
delete[] temp;
}
INLINEDEF _CUDA_HD void permuteShapeBufferInPlace(Nd4jLong *shapeBuffer, int *rearrange, Nd4jLong *out) {
if(shapeBuffer != out)
memcpy(out,shapeBuffer,sizeof(Nd4jLong) * shape::shapeInfoLength(shapeBuffer));
shape::doPermuteShapeInfo(out, rearrange);
}
INLINEDEF _CUDA_HD Nd4jLong *permuteShapeBuffer(Nd4jLong *shapeBuffer, int* rearrange) {
auto len = shape::shapeInfoLength(shape::rank(shapeBuffer));
Nd4jLong *copy = shape::copyOf(len, shapeBuffer);
shape::doPermuteShapeInfo(copy,rearrange);
return copy;
}
INLINEDEF _CUDA_HD void doPermuteShapeInfo(Nd4jLong *shapeInfo, const int *rearrange, Nd4jLong len) {
if(len == -1) // calculate array length if it is not given
len = shape::length(shapeInfo);
//check whether shape is like {1} or {1,1} or {1,1,1,1,...} - in this case we don't need permute
if(len == 1)
return;
const int rank = shape::rank(shapeInfo);
// check whether rearrange is like {0,1,2,3,...} - in this case we don't need permute as well
bool isPermutNecessary = false;
for(int i = 0; i < rank; ++i)
if(rearrange[i] != i) {
isPermutNecessary = true;
break;
}
if(!isPermutNecessary)
return;
// check whether rearrange contains correct indexes
for(int i = 0; i < rank; ++i)
if(rearrange[i] >= rank || rearrange[i] < 0) {
printf("shape::doPermuteShapeInfo function failed: rearrange indexes are incorrect !\n");
return;
}
// if everything is ok then perform permute
auto temp = new Nd4jLong[shape::shapeInfoLength(rank) - 3];
memcpy(temp, shapeInfo, sizeof(Nd4jLong) * (shape::shapeInfoLength(rank) - 3));
for (int i = 0; i < rank; ++i) {
shapeInfo[i + 1] = temp[rearrange[i] + 1];
shapeInfo[i + 1 + rank] = temp[rearrange[i] + 1 + rank];
}
shape::setOrderAndEws(shapeInfo, len);
delete[] temp;
}
INLINEDEF _CUDA_HD Nd4jLong *createPermuteIndexes(int originalRank, int *dimension,int dimensionLength) {
int delta = originalRank - dimensionLength;
traceNew(17);
Nd4jLong *ret = new Nd4jLong[originalRank];
for(int i = 0; i < delta; i++) {
ret[i] = i + dimensionLength;
}
for(int i = delta; i < originalRank; i++) {
ret[i] = i - delta;
}
return ret;
}
/**
* Get the ordering for the device
* @param length
* @param shape
* @param stride
* @param elementStride
* @return
*/
INLINEDEF _CUDA_HD char getOrder(int length, Nd4jLong *shape, Nd4jLong *stride, int elementStride) {
int sd = -1;
int dim = -1;
int i = -1;
int cContiguous = 1;
int isFortran = 1;
sd = 1;
for (i = length - 1; i >= 0; --i) {
dim = shape[i];
if (stride[i] != sd) {
cContiguous = 0;
break;
}
/* contiguous, if it got this far */
if (dim == 0) {
break;
}
sd *= dim;
}
/* check if fortran contiguous */
sd = elementStride;
for (i = 0; i < length; ++i) {
dim = shape[i];
if (stride[i] != sd) {
isFortran = 0;
}
if (dim == 0) {
break;
}
sd *= dim;
}
if (isFortran && cContiguous)
return 'a';
else if (isFortran && !cContiguous)
return 'f';
else if (!isFortran && !cContiguous)
return 'c';
else
return 'c';
}
/**
* Ensure that every value in the re arrange
* array is unique
* @param arr
* @param shape
* @param arrLength
* @param shapeLength
* @return
*/
template <typename T>
INLINEDEF _CUDA_HD int checkArrangeArray(T *arr, int arrLength, int shapeLength) {
if (arrLength != shapeLength)
return -1;
for (int i = 0; i < arrLength; i++) {
if (arr[i] >= arrLength || arr[i] < 0)
return -1;
}
for (int i = 0; i < arrLength; i++) {
for (int j = 0; j < arrLength; j++) {
if (i != j && arr[i] == arr[j])
return -1;
}
}
return 1;
}
INLINEDEF _CUDA_HD void traceNew(int id) {
//printf("new happened: [%i]\n", id);
#ifndef __CUDACC__
//fflush(stdout);
#endif
}
/**
* Permute the shape information
* @param info the shape information to permute
* @param rearrange the order to re arrange
* @param rank the rank of the rearrange array
*/
INLINEDEF _CUDA_HD void permute(ShapeInformation **info, int *rearrange, int rank) {
ShapeInformation *infoDeref = *info;
checkArrangeArray(rearrange, rank, rank);
shape::doPermuteSwap(rank, &infoDeref->shape, rearrange);
shape::doPermuteSwap(rank, &infoDeref->stride, rearrange);
char order = getOrder(rank,
infoDeref->shape,
infoDeref->stride,
infoDeref->elementWiseStride);
infoDeref->order = order;
}
/**
* Returns whether the
* given shape is a vector or not
* @param shape the shape of the array
* @param rank the rank of the shape
*/
INLINEDEF _CUDA_HD int isVector(Nd4jLong *shape, int rank) {
if (rank == 0)
return 0;
if (rank == 1)
return 1;
if (rank > 2)
return 0;
else if (rank <= 2) {
if (shape[0] == 1 || shape[1] == 1)
return 1;
}
return 0;
}
INLINEDEF _CUDA_HD bool isLikeVector(Nd4jLong *shapeInfo, int& posOfNonUnityDim) {
int numOfNonUnity = 0;
for(int i = 1; i <= shapeInfo[0]; ++i) {
if(shapeInfo[i] != 1) {
++numOfNonUnity;
posOfNonUnityDim = i-1;
}
}
return numOfNonUnity == 1 && shapeInfo[0] > 2;
}
INLINEDEF _CUDA_HD bool isCommonVector(const Nd4jLong *shapeInfo, int& posOfNonUnityDim) {
if(rank(shapeInfo) > 0 && length(shapeInfo) == 1) {
posOfNonUnityDim = 0;
return true;
}
int numOfNonUnity = 0;
for(int i = 1; i <= shapeInfo[0]; ++i) {
if(shapeInfo[i] != 1) {
++numOfNonUnity;
posOfNonUnityDim = i-1;
}
}
return numOfNonUnity == 1;
}
INLINEDEF _CUDA_H Nd4jLong* detachShape(Nd4jLong *originalShape) {
Nd4jLong *newShape = new Nd4jLong[shape::shapeInfoLength(originalShape)];
memcpy(newShape, originalShape, shape::shapeInfoByteLength(originalShape));
return newShape;
}
INLINEDEF _CUDA_H Nd4jLong* copyShape(Nd4jLong *originalShape) {
Nd4jLong *newShape = new Nd4jLong[shape::shapeInfoLength(originalShape)];
memcpy(newShape, originalShape, shape::shapeInfoByteLength(originalShape));
return newShape;
}
INLINEDEF _CUDA_HD int isVector(const Nd4jLong *shapeInfo) {
return isVector(shape::shapeOf(const_cast<Nd4jLong*>(shapeInfo)), shape::rank(shapeInfo));
}
INLINEDEF _CUDA_HD bool isRowVector(const Nd4jLong *shapeInfo) {
bool isVector = shape::isVector(shapeInfo) == 1;
bool shapeFirstOne = shape::shapeOf(const_cast<Nd4jLong*>(shapeInfo))[0] == 1;
return isVector && shapeFirstOne;
}
INLINEDEF _CUDA_HD bool isColumnVector(Nd4jLong *shapeInfo) {
bool isVector = shape::isVector(shapeInfo) == 1;
bool shapeFirstOne = shape::shapeOf(shapeInfo)[0] == 1;
return isVector && !shapeFirstOne;
}
INLINEDEF _CUDA_HD int oneDimEqualToLength(Nd4jLong *shape, int rank) {
for(int i = 0; i < rank; i++) {
if(shape[i] == shape::prod(shape,rank))
return 1;
}
return 0;
}
INLINEDEF _CUDA_HD int oneDimEqualToLength(Nd4jLong *shapeInfo) {
return oneDimEqualToLength(shape::shapeOf(shapeInfo),shape::rank(shapeInfo));
}
/**
* Returns whether the
* given shape is a vector or not
* @param shape the shape of the array
* @param rank the rank of the shape
*/
INLINEDEF _CUDA_HD int isMatrix(Nd4jLong *shape, int rank) {
if (rank > 2)
return 0;
else if (rank <= 2) {
if (shape[0] == 1 || shape[1] == 1)
return 0;
}
return 1;
}
INLINEDEF _CUDA_HD int isMatrix(Nd4jLong *shapeInfo) {
return isMatrix(shape::shapeOf(shapeInfo),shape::rank(shapeInfo));
}
/**
* Returns the shape portion of an information
* buffer
*/
INLINEDEF _CUDA_HD Nd4jLong *shapeOf(Nd4jLong *buffer) {
return buffer + 1;
}
/**
* Return a copy of a buffer.
* This buffer allocates memory
* that must be freed elsewhere.
*/
template <typename T>
INLINEDEF _CUDA_HD T *copyOf(Nd4jLong length, T *toCopy) {
traceNew(18);
T *ret = new T[length];
return copyOf(length, toCopy, ret);
}
template <typename T>
INLINEDEF _CUDA_HD T* copyOf(Nd4jLong length, T *toCopy, T *ret) {
memcpy(ret, toCopy, sizeof(T)*length);
return ret;
}
/**
* Return a copy of a buffer.
* This buffer allocates memory
* that must be freed elsewhere.
*/
template <typename T>
INLINEDEF _CUDA_HD void copyTo(Nd4jLong length, T *from, T *to) {
memcpy(to, from, sizeof(T)*length);
}
/**
* Return a copy of a buffer.
* This buffer allocates memory
* that must be freed elsewhere.
*/
INLINEDEF _CUDA_HD void copyTo(int length, Nd4jLong *from, Nd4jLong *to, Nd4jLong *indexes) {
for(int i = 0; i < length; i++) {
to[i] = from[indexes[i]];
}
}
/**
* Permute the given strides
* in the given rearrange order
* @param toPermute the buffer to permute
* @param shapeRank the length of the buffer to permute
* @param rearrange the rearrange order (must be 0 based indexes
* and all must be filled in)
* @return the rearranged array
*/
/*
INLINEDEF _CUDA_HD Nd4jLong *permutedStrides(Nd4jLong *toPermute, int shapeRank, int *rearrange) {
Nd4jLong *strideCopy = copyOf(shapeRank, toPermute);
checkArrangeArray(rearrange, shapeRank, shapeRank);
Nd4jLong *newStride = doPermuteSwap(shapeRank, strideCopy, rearrange);
delete[] strideCopy;
return newStride;
}
*/
/**
* Return the slice (shape + 1 in pointer arithmetic)
* @param shape the shape to take the slice of
* @return the shape array - the first entry
*/
INLINEDEF _CUDA_HD Nd4jLong *slice(Nd4jLong *shape) {
return shape + 1;
}
INLINEDEF _CUDA_HD int slices(Nd4jLong *shapeBuffer) {
return static_cast<int>(shape::shapeOf(shapeBuffer)[0]);
}
INLINEDEF _CUDA_HD Nd4jLong *sliceOfShapeBuffer(Nd4jLong sliceIdx, Nd4jLong *shapeBuffer) {
int rank = shape::rank(shapeBuffer);
int newRank = rank - 1;
if(newRank < 2)
newRank = 2;
Nd4jLong *newShapeBuffer = new Nd4jLong[shape::shapeInfoLength(newRank)];
newShapeBuffer[0] = newRank;
Nd4jLong *currShape = shape::shapeOf(shapeBuffer);
Nd4jLong *currStride = shape::stride(shapeBuffer);
//initialize new shape and stride by taking the shape and stride + 1
//and adding to the shape information
//a slice is always just taking the existing shape and cutting the first index off
//of the shape and stride
Nd4jLong *newShape = shape::shapeOf(newShapeBuffer);
Nd4jLong *newStride = shape::stride(newShapeBuffer);
if(shape::isVector(shapeBuffer)) {
Nd4jLong *currShape = shape::shapeOf(shapeBuffer);
//row vector: slice index 0 is a valid index, just copy the whole thing
if(currShape[0] == 1) {
if(sliceIdx == 0) {
memcpy(newShapeBuffer,shapeBuffer,shape::shapeInfoByteLength(shape::rank(shapeBuffer)));
return newShapeBuffer;
}
}
//column vector: this will be a scalar
else {
delete[] newShapeBuffer;
Nd4jLong *scalar = shape::createScalarShapeInfo();
int offset = shape::offset(shapeBuffer);
scalar[shape::shapeInfoLength(2) - 3] = offset + sliceIdx;
return scalar;
}
}
else if(shape::isMatrix(shapeBuffer)) {
newShape[0] = 1;
newShape[1] = currShape[1];
newStride[0] = 1;
newStride[1] = currStride[1];
}
else {
for(int i = 0; i < newRank; i++) {
newShape[i] = currShape[i + 1];
newStride[i] = currStride[i + 1];
}
}
auto indices = new Nd4jLong[rank];
memset((void *) indices,0,rank * sizeof(Nd4jLong));
indices[0] = sliceIdx;
Nd4jLong offset = shape::getOffset(0,newShape,newStride,indices,rank);
newShapeBuffer[shape::shapeInfoLength(newRank) - 3] = offset;
// set current order and ews
newShapeBuffer[2 * newRank + 2] = shape::elementWiseStride(shapeBuffer);
newShapeBuffer[2 * newRank + 3] = shape::order(shapeBuffer);
// correct order and ews if necessary
shape::setOrderAndEws(newShapeBuffer);
delete[] indices;
return newShapeBuffer;
}
/**
* Returns the length of the
* shape information buffer:
* rank * 2 + 3
* @param rank the rank to get the shape
* info length for
* @return rank * 2 + 4
*/
INLINEDEF _CUDA_HD int shapeInfoLength(int rank) {
//FIXME magic numbers
return rank * 2 + 4;
}
INLINEDEF _CUDA_HD int shapeInfoLength(Nd4jLong* shape) {
return shapeInfoLength(static_cast<int>(shape[0]));
}
INLINEDEF _CUDA_HD int shapeInfoLength(const Nd4jLong* shape) {
return shapeInfoLength(static_cast<int>(shape[0]));
}
INLINEDEF _CUDA_HD size_t shapeInfoByteLength(int rank) {
//FIXME magic numbers
return (rank * 2 + 4) * sizeof(Nd4jLong);
}
INLINEDEF _CUDA_HD size_t shapeInfoByteLength(const Nd4jLong* shapeInfo) {
//FIXME magic numbers
return shapeInfoByteLength((int) shapeInfo[0]);
}
/**
* Returns the rank portion of
* an information buffer
*/
INLINEDEF _CUDA_HD int rank(const Nd4jLong *buffer) {
return static_cast<int>(buffer[0]);
}
INLINEDEF _CUDA_HD int rank(const int *buffer) {
return buffer[0];
}
INLINEDEF _CUDA_HD int rank(const unsigned int *buffer) {
return static_cast<int>(buffer[0]);
}
INLINEDEF _CUDA_HD Nd4jLong* ews(Nd4jLong* shapeInfo) {
return shapeInfo + 2 * shapeInfo[0] + 2;
}
/**
* Converts a raw int buffer of the layout:
* rank
* shape
* stride
* offset
* elementWiseStride
*
* where shape and stride are both straight int pointers
*/
INLINEDEF _CUDA_HD ShapeInformation *infoFromBuffer(Nd4jLong *buffer) {
traceNew(19);
auto info = new ShapeInformation;
auto length = shapeInfoLength(rank(buffer));
auto rank = buffer[0];
//start after rank
info->shape = buffer + 1;
info->stride = buffer + (1 + rank);
info->rank = rank;
info->offset = buffer[length - 3];
info->elementWiseStride = buffer[length - 2];
Nd4jLong *stride = buffer + 1 + rank;
info->stride = stride;
info->order = (char) buffer[length - 1];
return info;
}
/**
* Returns the stride portion of an information
* buffer
*/
INLINEDEF _CUDA_HD Nd4jLong *stride(Nd4jLong *buffer) {
return buffer + (1 + rank(buffer));
}
INLINEDEF _CUDA_HD Nd4jLong *stride(const Nd4jLong *buffer) {
return stride(const_cast<Nd4jLong *>(buffer));
}
INLINEDEF _CUDA_HD bool isEmpty(const Nd4jLong *shapeInfo) {
return ((shape::extra(const_cast<Nd4jLong*>(shapeInfo)) & ARRAY_EMPTY) == ARRAY_EMPTY);
}
/**
* Compute the length of the given shape
*/
INLINEDEF _CUDA_HD Nd4jLong length(const Nd4jLong *shapeInfo) {
const int rank = shape::rank(shapeInfo);
if (rank == 0) {
if (isEmpty(shapeInfo))
return 0L;
return 1L;
}
if (rank == 1)
return shapeInfo[1];
// if(shape::elementWiseStride(shapeInfo) == 1) { // contiguous
// if(shape::order(shapeInfo) == 'c')
// return shapeInfo[1] * shapeInfo[rank + 1]; // first dim * first stride
// return shapeInfo[rank] * shapeInfo[2 * rank]; // last dim * last stride
// }
return shape::prodLong(shape::shapeOf(const_cast<Nd4jLong*>(shapeInfo)), rank);
}
INLINEDEF _CUDA_HD Nd4jLong length(std::initializer_list<int>& shape) {
Nd4jLong ret = 1;
for (auto v : shape) {
ret *= v;
}
return ret;
}
INLINEDEF _CUDA_HD Nd4jLong length(std::initializer_list<Nd4jLong>& shape) {
Nd4jLong ret = 1;
for (auto v : shape) {
ret *= v;
}
return ret;
}
/***
* Returns the offset
* portion of an information buffer
*/
INLINEDEF _CUDA_HD Nd4jLong offset(Nd4jLong *buffer) {
return buffer[shape::shapeInfoLength(shape::rank(buffer)) - 3];
}
INLINEDEF _CUDA_HD Nd4jLong& extra(Nd4jLong *buffer) {
return buffer[shape::shapeInfoLength(shape::rank(buffer)) - 3];
}
/**
* Returns the ordering
* for this shape information buffer
*/
INLINEDEF _CUDA_HD char order(const Nd4jLong *buffer) {
//FIXME magic numbers
return static_cast<char>(buffer[buffer[0] * 2 + 3]);
}
/**
* Returns type
*/
INLINEDEF _CUDA_HD Nd4jLong type(const Nd4jLong *shapeInfo) {
return shapeInfo[2 * shapeInfo[0] + 1];
}
/**
* Returns the element wise stride for this information
* buffer
*/
INLINEDEF _CUDA_HD Nd4jLong elementWiseStride(const Nd4jLong *buffer) {
return buffer[shapeInfoLength(static_cast<int>(buffer[0])) - 2];
}
/**
* Returns the element wise stride for this information
* buffer relative to a dimension and reduction index
*/
INLINEDEF _CUDA_HD Nd4jLong reductionIndexElementWiseStride(Nd4jLong* buffer, int* dimension, int dimensionLength) {
if(dimensionLength > 1) {
if(shape::order(buffer) == 'f') {
/**
* The element wise stride belongs to a reduction index.
* When used out of order, we can get rid of the data
* dependencies and rely on using the max dimension
* specified for stride instead.
* Say we take the sum(0,1) along arr
* we can use arr.stride(1) as a representation
* along which to iterate.
*/
if(shape::shapeOf(buffer)[dimension[dimensionLength - 1]] != 1) {
//int tadElementWiseStride = shape::stride(buffer)[dimension[dimensionLength - 1]];
//return tadElementWiseStride;
auto tadElementWiseStride = shape::stride(buffer)[dimension[0]];
return tadElementWiseStride;
}
return 1;
}
else {
/**
* The element wise stride belongs to a reduction index.
* When used out of order, we can get rid of the data
* dependencies and rely on using the max dimension
* specified for stride instead.
* Say we take the sum(0,1) along arr
* we can use arr.stride(1) as a representation
* along which to iterate.
*/
if(shape::shapeOf(buffer)[dimension[dimensionLength - 1]] != 1) {
auto tadElementWiseStride = shape::stride(buffer)[dimension[dimensionLength - 1]];
return tadElementWiseStride;
}
return 1;
}
}
else {
if(shape::order(buffer) == 'f') {
/**
* The element wise stride belongs to a reduction index.
* When used out of order, we can get rid of the data
* dependencies and rely on using the max dimension
* specified for stride instead.
* Say we take the sum(0,1) along arr
* we can use arr.stride(1) as a representation
* along which to iterate.
*/
auto tadElementWiseStride = shape::stride(buffer)[dimension[0]];
return tadElementWiseStride;
}
else {
/**
* The element wise stride belongs to a reduction index.
* When used out of order, we can get rid of the data
* dependencies and rely on using the max dimension
* specified for stride instead.
* Say we take the sum(0,1) along arr
* we can use arr.stride(1) as a representation
* along which to iterate.
*/
auto tadElementWiseStride = shape::stride(buffer)[dimension[dimensionLength - 1]];
return tadElementWiseStride;
}
}
}
/**
* Returns whether
* the given shape info buffer
* represents a scalar shape
*/
INLINEDEF _CUDA_HD int isScalar(Nd4jLong *info) {
const int rank = shape::rank(info);
if(rank > 2)
return 0;
if(rank == 0)
return 1;
if(rank == 1)
return shape::shapeOf(info)[0] == 1;
if(rank == 2)
return shape::shapeOf(info)[0] == 1 && shape::shapeOf(info)[1] == 1;
return 0;
}
/**
* Returns whether
* the given shape information
* represents a scalar
* shape or not
*/
INLINEDEF _CUDA_HD int isScalar(volatile ShapeInformation *info) {
const int rank = info->rank;
if(rank > 2)
return 0;
if(rank == 1)
return info->shape[0] == 1;
if(rank == 2)
return info->shape[0] == 1 && info->shape[1] == 1;
return 0;
}
/**
* Return a copy of this array with the
* given index omitted
*
* @param data the data to copy
* @param indexes the index of the item to remove
* @param dataLength the length of the data array
* @param indexesLength the length of the data array
* @return the new array with the omitted
*
* item
*/
template <typename T1, typename T2>
INLINEDEF _CUDA_HD void removeIndex(T1* data, T2 *indexes, Nd4jLong dataLength, Nd4jLong indexesLength, T1 *ret) {
int count = 0;
int absLength = dataLength - indexesLength;
for (int i = 0; i < dataLength && count < absLength; i++) {
int contains = 0;
for (int j = 0; j < indexesLength; j++) {
if (i == indexes[j]) {
contains = 1;
break;
}
}
if (!contains) {
ret[count] = data[i];
count++;
}
}
}
/**
* Return a copy of this array with the
* given index omitted
*
* @param data the data to copy
* @param indexes the index of the item to remove
* @param dataLength the length of the data array
* @param indexesLength the length of the data array
* @return the new array with the omitted
*
* item
*/
template <typename T1, typename T2>
INLINEDEF _CUDA_HD T1* removeIndex(T1 *data, T2 *indexes, Nd4jLong dataLength, Nd4jLong indexesLength) {
auto lengthOfArr = dataLength - indexesLength;
if(lengthOfArr < 0) {
printf("Remove index call created a <= 0 length array. This was likely not intended.");
}
auto ret = new T1[lengthOfArr];
memset(ret,0,sizeof(T1) * lengthOfArr);
removeIndex<T1, T2>(data, indexes, dataLength, indexesLength, ret);
return ret;
}
INLINEDEF _CUDA_HD Nd4jLong* everyIndexBut(Nd4jLong *indexes,int indexesLength,int begin,int end) {
int len = end - indexesLength;
traceNew(20);
auto ret = new Nd4jLong[len];
int retIdx = 0;
//not here that we do 0 based indexing for end - this assumes things like:
//0 to 4 are specified
for(int i = begin; i < end ; i++) {
bool found = false;
for(int j = 0; j < indexesLength; j++) {
if(indexes[j] == i) {
found = true;
break;
}
}
if(!found) {
ret[retIdx++] = i;
}
}
return ret;
}
/**
* Computes the offset for accessing
* a global element given the shape information
* and the offset to be read.
*/
#ifdef __CUDACC__
INLINEDEF __device__ int tadOffset(ShapeInformation *xInfo, int offset) {
return offset + threadIdx.x * xInfo->elementWiseStride;
}
#endif
/**
* Returns a shape
* forces the given length to be 2.
* @param shape the shape to modify
* @param dimension the dimension (row or column)
* for the shape to be returned as
* @return the new shape
*/
INLINEDEF _CUDA_HD Nd4jLong *ensureVectorShape(Nd4jLong *shape, int dimension) {
traceNew(21);
Nd4jLong *ret = new Nd4jLong[2];
if (dimension == 0) {
ret[0] = 1;
ret[1] = shape[0];
} else {
ret[0] = shape[0];
ret[1] = 1;
}
return ret;
}
/**
* Returns a shape
* forces the given length to be 2.
* @param shape the shape to modify
* @param dimension the dimension (row or column)
* for the shape to be returned as
* @return the new shape
*/
INLINEDEF _CUDA_HD Nd4jLong *ensureVectorShape(Nd4jLong *shape) {
return ensureVectorShape(shape, 0);
}
/**
* This method does STRICT comparison for two shape buffers
*
* @param shape
* @return
*/
INLINEDEF _CUDA_HD bool equalsStrict(const Nd4jLong *shapeA, const Nd4jLong *shapeB) {
if (shapeA[0] != shapeB[0])
return false;
if (shapeA[0] == 0)
return true;
// we do full comparison here
int length = shape::shapeInfoLength(shapeA[0]);
for (int e = 1; e < length; e++)
if (shapeA[e] != shapeB[e])
return false;
return true;
}
//////////////////////////////////////////////////////////////////////
INLINEDEF _CUDA_HD bool haveSameShapeAndStrides(const Nd4jLong *shapeInfo1, const Nd4jLong *shapeInfo2) {
if (shapeInfo1[0] != shapeInfo2[0])
return false;
if (shapeInfo1[0] == 0)
return true;
int range = 2 * shapeInfo1[0];
for (int e = 1; e <= range; e++)
if (shapeInfo1[e] != shapeInfo2[e])
return false;
return true;
}
//////////////////////////////////////////////////////////////////////
INLINEDEF _CUDA_HD bool haveSameShapeAndStrides(const Nd4jLong *shapeInfo1, const Nd4jLong *shapeInfo2, const Nd4jLong *shapeInfo3) {
return shape::haveSameShapeAndStrides(shapeInfo1, shapeInfo2) && shape::haveSameShapeAndStrides(shapeInfo1, shapeInfo3);
}
INLINEDEF _CUDA_HD int sizeAt(const Nd4jLong *shape, const int dim) {
if (0 == rank(shape))
return 1;
if (dim >= 0)
return shape[1+dim];
else
return shape[1+(rank(shape) + dim)];
}
/**
* This method does SOFT comparison for two shape buffers, we compare only rank & shapes
*
* @param shape
* @return
*/
INLINEDEF _CUDA_HD bool equalsSoft(const Nd4jLong *shapeA, const Nd4jLong *shapeB) {
if (shapeA[0] != shapeB[0])
return false;
if (shapeA[0] == 0)
return true;
// we compare only shapes, and ignoring stride & ews
auto length = shapeA[0];
for (int e = 1; e <= length; e++)
if (shapeA[e] != shapeB[e])
return false;
return true;
}
INLINEDEF _CUDA_HD bool equalsTypesAndShapesSoft(const Nd4jLong *shapeA, const Nd4jLong *shapeB) {
return equalsSoft(shapeA, shapeB) && shapeA[shapeInfoLength(shapeA) - 3] == shapeB[shapeInfoLength(shapeB) - 3];
}
/**
* Generate an int buffer
* up to the given length
* at the specified increment
*
*/
template <typename T>
INLINEDEF _CUDA_HD T* range(int from, int to, int increment) {
int diff = nd4j::math::nd4j_abs<int>(from - to);
int retLength = diff / increment;
T *ret;
traceNew(22);
if(diff / increment < 1)
ret = new T[1];
else
ret = new T[diff / increment];
if (from < to) {
int count = 0;
for (int i = from; i < to; i += increment) {
if (count >= retLength)
break;
ret[count++] = i;
}
} else if (from > to) {
int count = 0;
for (int i = from - 1; i >= to; i -= increment) {
if (count >= retLength)
break;
ret[count++] = i;
}
}
return ret;
}
/**
* Generate a range
* beginning at from and ending at to
* incrementing by 1
* @param from the start
* @param to the end
* @return the int array starting at from and ending at to
*/
template <typename T>
INLINEDEF _CUDA_HD T* range(int from, int to) {
return range<T>(from, to, 1);
}
/**
* Keep the given indexes in the data
* @param data
* @param index
* @param indexLength
* @param dataLength
* @return
*/
INLINEDEF _CUDA_HD Nd4jLong *keep(volatile Nd4jLong *data, int* index, int indexLength, int dataLength) {
traceNew(23);
Nd4jLong *ret = new Nd4jLong[indexLength];
int count = 0;
for (int i = 0; i < dataLength; i++) {
int contains = 0;
for (int j = 0; j < indexLength; j++) {
if (i == index[j]) {
contains = 1;
break;
}
}
if (contains)
ret[count++] = data[i];
}
return ret;
}
/**
* Generate a reverse
* copy of the data
*/
template <typename T>
INLINEDEF _CUDA_HD T* reverseCopy(T *data, Nd4jLong length) {
if (length < 1)
return nullptr;
traceNew(24);
T *copy = new T[length];
for (Nd4jLong i = 0; i <= length / 2; i++) {
T temp = data[i];
copy[i] = data[length - i - 1];
copy[length - i - 1] = temp;
}
return copy;
}
template <typename T>
INLINEDEF _CUDA_HD void reverseCopyTo(T *from, T *to, Nd4jLong length) {
if (length < 1)
return;
for (Nd4jLong i = 0; i <= length / 2; i++) {
T temp = from[i];
to[i] = from[length - i - 1];
to[length - i - 1] = temp;
}
}
template <typename T>
INLINEDEF _CUDA_HD void reverseCopyTo(T *from, T *to, Nd4jLong *indexes, Nd4jLong length) {
if (length < 1)
return;
for (Nd4jLong i = 0; i <= length / 2; i++) {
T temp = from[indexes[i]];
to[i] = from[indexes[length - i - 1]];
to[length - i - 1] = temp;
}
}
/**
*
* @param arr1
* @param arr1Length
* @param arr2
* @param arr2Length
* @return
*/
template <typename T>
INLINEDEF _CUDA_HD T* concat(T* arr1, Nd4jLong arr1Length, T* arr2, Nd4jLong arr2Length) {
traceNew(25);
T *ret = new T[arr1Length + arr2Length];
std::memcpy(ret, arr1, arr1Length * sizeof(T));
std::memcpy(ret + arr1Length, arr2, arr2Length * sizeof(T));
return ret;
}
/**
*
* @param numArrays
* @param numTotalElements
* @param arr
* @param lengths
* @return
*/
template <typename T>
INLINEDEF _CUDA_HD T *concat(Nd4jLong numArrays, Nd4jLong numTotalElements, T **arr, Nd4jLong *lengths) {
T* ret = new T[numTotalElements];
Nd4jLong count = 0;
for (Nd4jLong i = 0; i < numArrays; i++) {
for (Nd4jLong j = 0; j < lengths[i]; j++) {
ret[count++] = arr[i][j];
}
}
return ret;
}
/**
* Get the length per slice of the
* given shape and the dimension
* @param rank the rank of the shape
* @param shape the shape of to get
* the length per slice for
* @param dimension the dimension to
* get the length per slice for
* @param dimensionLength the length of the dimension array
* @return the length per slice of the given shape
* along the given dimension
*/
INLINEDEF _CUDA_HD Nd4jLong lengthPerSlice(int rank, Nd4jLong *shape, int* dimension, int dimensionLength) {
if(shape::isVector(shape,rank)) {
//return total length for row vectors
if(dimensionLength == 1 && shape[0] == 1) {
return shape::prod(shape,rank);
}
}
else if(rank == dimensionLength)
return shape::prod(shape,rank);
int absSelta = nd4j::math::nd4j_abs<int>(rank - dimensionLength);
traceNew(27);
auto ret2 = shape::removeIndex<Nd4jLong>(shape, dimension, rank, dimensionLength);
auto ret = prodLong(ret2, absSelta);
delete[] ret2;
return ret;
}
/**
* calculates the offset for a tensor
* @param index
* @param arr
* @param tensorShape
* @return
*/
INLINEDEF _CUDA_HD Nd4jLong sliceOffsetForTensor(int rank, int index, Nd4jLong *shape, Nd4jLong *tensorShape, int tensorShapeLength, int* dimension, int dimensionLength) {
auto tensorLength = prodLong(tensorShape, tensorShapeLength);
auto lengthPerSlice2 = lengthPerSlice(rank, shape, dimension, dimensionLength);
if (lengthPerSlice2 <= 0) {
return 0;
}
Nd4jLong offset = index * tensorLength / lengthPerSlice2;
return offset;
}
/**
* calculates the offset for a tensor
* @param index
* @param arr
* @param tensorShape
* @return
*/
INLINEDEF _CUDA_HD Nd4jLong sliceOffsetForTensor(int index,int tensorLength,int lengthPerSlice2) {
Nd4jLong offset = index * tensorLength / lengthPerSlice2;
return offset;
}
#ifdef __CUDACC__
/**
* Computes the offset for accessing
* a global element given the shape information
* and the offset to be read.
*/
INLINEDEF _CUDA_D int tadOffset(Nd4jLong *xInfo, int offset) {
return offset + threadIdx.x * elementWiseStride(xInfo);
}
#endif
/**
* Computes the number
* of tensors along
* a given dimension
*/
INLINEDEF _CUDA_HD Nd4jLong tensorsAlongDimension(volatile int rank, volatile int length,
volatile Nd4jLong *shape, int *dimension, int dimensionLength) {
Nd4jLong *tensorShape = shape::keep(shape, dimension, dimensionLength, rank);
Nd4jLong ret = length / shape::prodLong(tensorShape, dimensionLength);
delete[] tensorShape;
return ret;
}
/**
* Computes the number
* of tensors along
* a given dimension
*/
INLINEDEF _CUDA_HD Nd4jLong tensorsAlongDimension(Nd4jLong *shapeInfo, int *dimension, int dimensionLength) {
Nd4jLong *keepShape = shape::shapeOf(shapeInfo);
Nd4jLong *tensorShape = shape::keep(keepShape, dimension, dimensionLength, rank(shapeInfo));
Nd4jLong ret = shape::length(shapeInfo) / shape::prodLong(tensorShape, dimensionLength);
delete[] tensorShape;
return ret;
}
/**
* Get an offset for retrieval
* from a data buffer
* based on the given
* shape stride and given indices
* @param baseOffset the offset to start from
* @param shape the shape of the array
* @param stride the stride of the array
* @param indices the indices to iterate over
* @return the double at the specified index
*/
INLINEDEF _CUDA_HD Nd4jLong getOffset(Nd4jLong baseOffset, const Nd4jLong *shape, const Nd4jLong *stride, const Nd4jLong *indices, int rank) {
Nd4jLong offset = baseOffset;
for(int i = 0; i < rank; i++) {
if(shape[i] != 1)
offset += indices[i] * stride[i];
}
return offset;
}
/**
* Returns the tensor along dimension
* for the given block index
* @param blockSize
* @param blockIdx
* @param i
* @return
*/
INLINEDEF _CUDA_HD int tadForBlockIndex(int blockSize, int blockIdx, int i) {
return blockIdx + i * blockSize;
}
/**
* Computes the number of tads per block
*
*/
INLINEDEF _CUDA_HD int tadsPerBlock(int blockSize, int tads) {
return nd4j::math::nd4j_ceil<double, int>(tads / (double) blockSize);
}
/**
* Returns a shape buffer
* for the shape information metadata.
*/
INLINEDEF _CUDA_HD Nd4jLong *toShapeBuffer( ShapeInformation *info) {
traceNew(29);
auto ret = new Nd4jLong[shapeInfoLength(info->rank)];
int count = 1;
int rank = info->rank;
ret[0] = info->rank;
for (int i = 0; i < rank; i++) {
ret[count++] = info->shape[i];
}
for (int i = 0; i < rank; i++) {
ret[count++] = info->stride[i];
}
ret[count++] = info->offset;
ret[count++] = info->elementWiseStride;
ret[count] = info->order;
return ret;
}
INLINEDEF _CUDA_HD Nd4jLong *toShapeBuffer( ShapeInformation *info, Nd4jLong* ret) {
int count = 1;
int rank = info->rank;
ret[0] = info->rank;
if (ret[0] == 0) {
ret[1] = 0;
ret[2] = 1;
ret[3] = 99;
return ret;
}
for (int i = 0; i < rank; i++) {
ret[count++] = info->shape[i];
}
for (int i = 0; i < rank; i++) {
ret[count++] = info->stride[i];
}
ret[count++] = info->offset;
ret[count++] = info->elementWiseStride;
ret[count++] = info->order;
return ret;
}
INLINEDEF _CUDA_HD void printIntArray(const Nd4jLong *arr, const int length) {
for(int i = 0; i < length; i++) {
printf(" %lld ", (long long) arr[i]);
}
printf("\n");
}
INLINEDEF _CUDA_HD void printIntArray(const int *arr, const int length) {
for(int i = 0; i < length; i++) {
printf(" %i ", arr[i]);
}
printf("\n");
}
INLINEDEF _CUDA_HD void printShapeInfo(Nd4jLong *shapeInfo) {
int rank = shape::rank(shapeInfo);
Nd4jLong *shape = shape::shapeOf(shapeInfo);
printf("Rank %d\n",rank);
printf("Shape:\n");
for(int i = 0; i < rank; i++) {
printf(" %lld ",(long long) shape[i]);
}
printf("\n");
Nd4jLong *stride = shape::stride(shapeInfo);
printf("Stride:\n");
for(int i = 0; i < rank; i++) {
printf(" %lld ", (long long) stride[i]);
}
printf("\n");
printf("Order %c\n",shape::order(shapeInfo));
}
INLINEDEF _CUDA_HD void printShapeInfoLinear(const Nd4jLong *shapeInfo) {
int rank = shape::rank(shapeInfo);
int lim = shape::shapeInfoLength(rank);
printf("ShapeInfo: [");
for (int i = 0; i < lim; i++) {
printf("%lld", (long long) shapeInfo[i]);
if (i < lim - 1) {
printf(", ");
}
}
printf("]\n");
#ifndef __CUDA_ARCH__
fflush(stdout);
#endif
}
INLINEDEF _CUDA_HD void printShapeInfoLinear(const char *msg, int rank, const Nd4jLong *shape, const Nd4jLong *strides) {
printf("%s : [", msg);
for (int i = 0; i < rank; i++) {
printf("%lld, ", (long long) shape[i]);
}
for (int i = 0; i < rank; i++) {
printf("%lld", (long long) strides[i]);
if (i < rank - 1)
printf(", ");
}
printf("]\n");
#ifndef __CUDA_ARCH__
fflush(stdout);
#endif
}
INLINEDEF _CUDA_HD void printShapeInfoLinear(const char *msg, const Nd4jLong *shapeInfo) {
int rank = shape::rank(shapeInfo);
int lim = shape::shapeInfoLength(rank);
printf("%s : [", msg);
for (int i = 0; i < lim; i++) {
printf("%lld", (long long) shapeInfo[i]);
if (i < lim - 1) {
printf(", ");
}
}
printf("]\n");
#ifndef __CUDACC__
fflush(stdout);
#endif
}
template <typename T>
INLINEDEF _CUDA_HD void printArray(void *varr,int length, const char * message) {
auto arr = reinterpret_cast<T*>(varr);
if (message != nullptr)
printf("%s: [", message);
else
printf("Array: [");
for (int i = 0; i < length; i ++) {
printf("%f", (float) arr[i]);
if (i + 1 < length) printf(", ");
}
printf("]\n");
#ifndef __CUDACC__
fflush(stdout);
#endif
}
INLINEDEF _CUDA_HD void printArray(float *arr,int length) {
printf("Array: [");
for (int i = 0; i < length; i ++) {
printf("%f", arr[i]);
if (i + 1 < length) printf(", ");
}
printf("]\n");
}
/**
* Given an linear index, element wise stride
* and the length of each tad
* map a linear index to a tad
* @param i the index to map
* @param the element wise stride for the tads
* @param numElementsPerTad the number of elements
* per tad
*/
INLINEDEF _CUDA_HD int tadIndex(int i, int elementWiseStride, int numElementsPerTad) {
return i / (numElementsPerTad * elementWiseStride);
}
/**
* Map a tad to a
* reduction index.
* @param tadIndexForOriginal the original tad index for the
* split up problem (eg: split is dimension 3 mapping to a 2,3 problem)
* @param tadsForReduced the number of tads for the shrunk down problem (eg: 2,3)
* @param tadsForOriginal the number of tads for the smaller problem (eg: 3)
*/
INLINEDEF _CUDA_HD int reductionIndexForTad(int tadIndexForOriginal, int tadsForReduced,
int tadsForOriginal) {
if (tadIndexForOriginal == 0)
return 0;
return tadIndexForOriginal / (tadsForOriginal / tadsForReduced);
}
INLINEDEF _CUDA_HD void transposeInplace(Nd4jLong *shapeBuffer) {
int rank = shape::rank(shapeBuffer);
Nd4jLong *shape = shape::shapeOf(shapeBuffer);
Nd4jLong *strides = shape::stride(shapeBuffer);
// swap shape
for (int e = 0; e < rank / 2; e++) {
int idx1 = rank - e - 1;
int idx2 = e;
int tmp = shape[idx2];
shape[idx2] = shape[idx1];
shape[idx1] = tmp;
}
// swap strides
for (int e = 0; e < rank / 2; e++) {
int idx1 = rank - e - 1;
int idx2 = e;
int tmp = strides[idx2];
strides[idx2] = strides[idx1];
strides[idx1] = tmp;
}
if (shape::order(shapeBuffer) == 'c')
shapeBuffer[shape::shapeInfoLength(shapeBuffer) - 1] = 102;
else
shapeBuffer[shape::shapeInfoLength(shapeBuffer) - 1] = 99;
}
/**
* Tad index for linear
* @param linearIndex
* @param tadLength
* @return
*/
INLINEDEF _CUDA_HD int tadIndexForLinear(int linearIndex, int tadLength) {
return linearIndex % tadLength;
}
/**
* Computes the number of tads
* per reduce index for the
* reduction tad.
*/
INLINEDEF _CUDA_HD int tadsPerReduceIndex(int tadsForReduce, int tadsForOriginal) {
return tadsForOriginal / tadsForReduce;
}
/**
* Maps a linear index to a reduction index
* @param i the linear index to map
* @param elementWiseStride the element wise stride
* for the multiple problem
* @param tadNum the number of tads for the shrunken problem
* @param originalTadNum the tad number for the reduced version of the problem
*/
INLINEDEF _CUDA_HD int reductionIndexForLinear(int i, int elementWiseStride, int numElementsPerTad,
int tadNum, int originalTadNum) {
int tad = tadIndex(i, elementWiseStride, numElementsPerTad);
return reductionIndexForTad(tad, tadNum, originalTadNum);
}
INLINEDEF _CUDA_HD Nd4jLong* createScalarShapeInfo() {
traceNew(30);
auto shape = new Nd4jLong[1];
shape[0] = 1;
auto stride = new Nd4jLong[1];
stride[0] = 1;
auto shapeInformation2 = new ShapeInformation();
shapeInformation2->rank = 1;
shapeInformation2->offset = 0;
shapeInformation2->stride = stride;
shapeInformation2->shape = shape;
shapeInformation2->elementWiseStride = 1;
shapeInformation2->order = 99;
Nd4jLong *ret = shape::toShapeBuffer(shapeInformation2);
delete shapeInformation2;
delete[] shape;
delete[] stride;
return ret;
}
INLINEDEF _CUDA_HD Nd4jLong* createScalarShapeInfo(Nd4jLong *ret) {
ret[0] = 2;
ret[1] = 1;
ret[2] = 1;
ret[3] = 1;
ret[4] = 1;
ret[5] = 0;
ret[6] = 1;
ret[7] = 99;
return ret;
}
/**
* Returns the prod of the data
* up to the given length
*/
INLINEDEF _CUDA_HD int prod(Nd4jLong *data, int length) {
int prod = 1;
for (int i = 0; i < length; i++) {
prod *= data[i];
}
return prod;
}
/**
* Returns the prod of the data
* up to the given length
*/
INLINEDEF _CUDA_HD Nd4jLong prodLong(const Nd4jLong *data, int length) {
Nd4jLong prod = 1;
for (int i = 0; i < length; i++) {
prod *= data[i];
}
return prod;
}
INLINEDEF _CUDA_HD int rearMostLeftOverItem(Nd4jLong *data, Nd4jLong *dimension,int dimensionLength) {
Nd4jLong *stride = shape::stride(data);
//corner case: return the final item when its greater than the max, since its guaranteed to be left over
//note here that strides are interpreted in reverse for tad
//start from the front rather than the back
int rank = shape::rank(data);
if(shape::order(data) == 'f') {
int dimIdx = dimensionLength - 1;
for(int i = rank - 1; i >= 0; i--) {
/**
* Needs to find an algorithm such that:
* looping backwards will find the highest dimension left
* that isn't included in the dimension index list.
*
* This can also be thought of as the last item of the first index
* of the difference between the full list of indices and
* the dimension indices.
*
* We should avoid excessive object creation by only looping backwards.
*/
if(dimension[dimIdx--] != i) {
int ret = stride[i];
return ret;
}
}
}
else {
int dimIdx = dimensionLength - 1;
for(int i = rank - 1; i >= 0; i--) {
/**
* Needs to find an algorithm such that:
* looping backwards will find the highest dimension left
* that isn't included in the dimension index list.
*
* This can also be thought of as the last item of the first index
* of the difference between the full list of indices and
* the dimension indices.
*
* We should avoid excessive object creation by only looping backwards.
*/
if(dimension[dimIdx--] != i) {
int ret = stride[i];
return ret;
}
}
}
int ret = stride[0];
return ret;
}
#ifdef __CUDACC__
__device__ INLINEDEF void sweepShapeInfoBuffer(Nd4jLong *shapeInfoBuffer, Nd4jLong *targetBuffer) {
// we read first element, to find out length of our shapeInfoBuffer
int rank = shapeInfoBuffer[0];
int len = shape::shapeInfoLength(rank);
for (int i = threadIdx.x; i < len; i += blockDim.x)
targetBuffer[i] = shapeInfoBuffer[i];
}
#endif
INLINEDEF _CUDA_HD Nd4jLong *shapeBufferOfNpy(cnpy::NpyArray arr) {
return shape::shapeBufferOfNpy(arr.shape.size(),(unsigned int*) arr.shape.data(),arr.fortranOrder);
}
// INLINEDEF _CUDA_HD Nd4jLong *shapeBufferOfNpyBuffer(char *buffer) {
// unsigned Nd4jLong *shape;
// unsigned int ndims, wordSize;
// bool fortranOrder;
// cnpy::parseNpyHeaderStr(std::string(buffer),wordSize,shape,ndims,fortranOrder);
// Nd4jLong * ret = shape::shapeBufferOfNpy(ndims,shape,fortranOrder);
// delete[] shape;
// return ret;
// }
INLINEDEF _CUDA_HD Nd4jLong *shapeBufferOfNpy(int rank, unsigned int* shape,bool fortranOrder) {
if(fortranOrder) {
Nd4jLong *shapeBufferRet = shape::shapeBufferFortran(rank, nd4j::FLOAT32,(Nd4jLong *) shape);
return shapeBufferRet;
}
else {
Nd4jLong *newShape = new Nd4jLong[rank];
for(int i = 0; i < rank; i++) {
newShape[i] = shape[i];
}
Nd4jLong *shapeBufferRet = shape::shapeBuffer(rank, nd4j::FLOAT32, newShape);
delete[] newShape;
return shapeBufferRet;
}
}
INLINEDEF _CUDA_HD bool strideDescendingCAscendingF(const Nd4jLong *shapeBuffer) {
int rank = shape::rank(shapeBuffer);
Nd4jLong *strides = shape::stride(const_cast<Nd4jLong*>(shapeBuffer));
char order = shape::order(shapeBuffer);
if (shape::isRowVector(shapeBuffer) && strides[0] == 1 && strides[1] == 1)
return true;
if (order == 'c') {
for (int i = 1; i < rank; i++)
if (strides[i-1] <= strides[i])
return false;
return true;
} else if (order == 'f') {
for (int i = 1; i < rank; i++)
if (strides[i-1] >= strides[i])
return false;
return true;
} else {
printf("Unknown order for array!\n");
return false;
}
}
INLINEDEF _CUDA_HD bool isContiguous(const Nd4jLong* shapeInfo) {
return (order(shapeInfo) == 'c') && (elementWiseStride(shapeInfo) > 0);
}
//////////////////////////////////////////////////////////////////////////
// copy-past from java hasDefaultStridesForShape function
INLINEDEF _CUDA_HD bool areStridesDefault(const Nd4jLong* shapeInfo) {
const int rank = shape::rank(shapeInfo);
if(rank == 0)
return true;
if(!strideDescendingCAscendingF(shapeInfo))
return false;
Nd4jLong defaultShapeInfo[MAX_SHAPEINFOLENGTH];
memcpy(defaultShapeInfo, shapeInfo, shape::shapeInfoByteLength(shapeInfo));
shape::updateStrides(defaultShapeInfo, shape::order(shapeInfo));
bool result = true;
for(int i = rank+1; i <= 2*rank; ++i)
if(defaultShapeInfo[i] != shapeInfo[i]) {
result = false;
break;
}
return result;
}
// INLINEDEF _CUDA_H bool reshapeC(const int oldRank, Nd4jLong* oldShape, const int newRank, Nd4jLong* newShapeOf, bool isFOrder, Nd4jLong* target) {
// int oldnd;
// Nd4jLong* olddims = shape::copyOf(oldRank, shape::shapeOf(oldShape));
// Nd4jLong* oldstrides = shape::copyOf(oldRank, shape::stride(oldShape));
// int np, op, last_stride;
// int oi, oj, ok, ni, nj, nk;
// Nd4jLong* newStrides = new Nd4jLong[newRank];
// oldnd = 0;
// /*
// * Remove axes with dimension 1 from the old array. They have no effect
// * but would need special cases since their strides do not matter.
// */
// for (oi = 0; oi < oldRank; oi++) {
// if (shape::shapeOf(oldShape)[oi] != 1) {
// olddims[oldnd] = shape::shapeOf(oldShape)[oi];
// oldstrides[oldnd] = shape::stride(oldShape)[oi];
// oldnd++;
// }
// }
// np = 1;
// for (ni = 0; ni < newRank; ni++) {
// np *= newShapeOf[ni];
// }
// op = 1;
// for (oi = 0; oi < oldnd; oi++) {
// op *= olddims[oi];
// }
// if (np != op) {
// /* different total sizes; no hope */
// delete[] olddims;
// delete[] oldstrides;
// delete[] newStrides;
// return false;
// }
// if (np == 0) {
// /* the current code does not handle 0-sized arrays, so give up */
// delete[] olddims;
// delete[] oldstrides;
// delete[] newStrides;
// return false;
// }
// /* oi to oj and ni to nj give the axis ranges currently worked with */
// oi = 0;
// oj = 1;
// ni = 0;
// nj = 1;
// while (ni < newRank && oi < oldnd) {
// np = newShapeOf[ni];
// op = olddims[oi];
// while (np != op) {
// if (np < op) {
// /* Misses trailing 1s, these are handled later */
// np *= newShapeOf[nj++];
// } else {
// op *= olddims[oj++];
// }
// }
// /* Check whether the original axes can be combined */
// for (ok = oi; ok < oj - 1; ok++) {
// if (isFOrder) {
// if (oldstrides[ok + 1] != olddims[ok] * oldstrides[ok]) {
// /* not contiguous enough */
// delete[] olddims;
// delete[] oldstrides;
// delete[] newStrides;
// return false;
// }
// } else {
// /* C order */
// if (oldstrides[ok] != olddims[ok + 1] * oldstrides[ok + 1]) {
// /* not contiguous enough */
// delete[] olddims;
// delete[] oldstrides;
// delete[] newStrides;
// return false;
// }
// }
// }
// /* Calculate new strides for all axes currently worked with */
// if (isFOrder) {
// newStrides[ni] = oldstrides[oi];
// for (nk = ni + 1; nk < nj; nk++) {
// newStrides[nk] = newStrides[nk - 1] * newShapeOf[nk - 1];
// }
// } else {
// /* C order */
// newStrides[nj - 1] = oldstrides[oj - 1];
// for (nk = nj - 1; nk > ni; nk--) {
// newStrides[nk - 1] = newStrides[nk] * newShapeOf[nk];
// }
// }
// ni = nj++;
// oi = oj++;
// }
// if (ni >= 1) {
// last_stride = newStrides[ni - 1];
// } else {
// last_stride = shape::elementWiseStride(oldShape);
// }
// if (isFOrder && ni >= 1) {
// last_stride *= newShapeOf[ni - 1];
// }
// for (nk = ni; nk < newRank; nk++) {
// newStrides[nk] = last_stride;
// }
// target[0] = newRank;
// int cnt = 1;
// for (int e = 0; e < newRank; e++)
// target[cnt++] = newShapeOf[e];
// for (int e = 0; e < newRank; e++)
// target[cnt++] = newStrides[e];
// target[shape::shapeInfoLength(newRank) - 3] = 0;
// target[shape::shapeInfoLength(newRank) - 2] = 0;
// target[shape::shapeInfoLength(newRank) - 1] = isFOrder ? 102 : 99;
// nd4j::ArrayOptions::setDataType(target, nd4j::ArrayOptions::dataType(oldShape));
// delete[] olddims;
// delete[] oldstrides;
// delete[] newStrides;
// return true;
// }
// INLINEDEF _CUDA_H bool reshapeC(const int oldRank, const Nd4jLong* oldShapeInfo, const int newRank, const Nd4jLong* newShape, const bool isFOrder, Nd4jLong* newShapeInfo) {
// // PLEASE NOTE !: reshaping not-permuted (ews=1) array in f order (except insertion/elimination of unities) will definitely cause allocation of new buffer for array elements
// // also this function takes into account identical shapes automatically, namely in that case oldShapeInfo is completely copied to newShapeInfo
// const int newOrder = isFOrder ? 102 : 99;
// const int oldOrder = oldShapeInfo[2 * oldRank + 3];
// newShapeInfo[0] = newRank;
// memcpy(newShapeInfo + 1, newShape, newRank * sizeof(Nd4jLong));
// Nd4jLong* newStrides = shape::stride(newShapeInfo);
// const Nd4jLong* oldShape = shape::shapeOf(const_cast<Nd4jLong*>(oldShapeInfo));
// const Nd4jLong* oldStrides = shape::stride(const_cast<Nd4jLong*>(oldShapeInfo));
// int oldStart(0), oldStop(1), newStart(0), newStop(1), newDim, oldDim;
// while (newStart < newRank && oldStart < oldRank) {
// newDim = newShape[newStart];
// oldDim = oldShape[oldStart];
// while (newDim != oldDim)
// if (newDim < oldDim) newDim *= newShape[newStop++];
// else oldDim *= oldShape[oldStop++];
// // ------ Check whether the original axes can be combined ------ //
// for (int i = oldStart; i < oldStop - 1; i++) {
// if(oldShape[i] == 1) { // ignore strides like {...,1,1,...}
// if(oldOrder == 102) ++oldStart;
// continue;
// }
// if(oldOrder == 102 && oldStrides[i + 1] != oldShape[i] * oldStrides[i])
// return false; // not contiguous enough
// if(oldOrder == 99 && oldStrides[i] != oldShape[i + 1] * oldStrides[i + 1])
// return false; // not contiguous enough
// }
// // ------ Calculate new strides for all axes currently worked with ------ //
// if(isFOrder) {
// newStrides[newStart] = oldStrides[oldStart];
// for (int i = newStart + 1; i < newStop; ++i)
// newStrides[i] = newStrides[i - 1] * newShape[i - 1];
// }
// else {
// newStrides[newStop - 1] = oldStrides[oldStop - 1];
// for (int i = newStop - 1; i > newStart; --i)
// newStrides[i - 1] = newStrides[i] * newShape[i];
// }
// newStart = newStop++;
// oldStart = oldStop++;
// }
// newShapeInfo[2 * newRank + 3] = shape::order(oldShapeInfo); // order
// newShapeInfo[2 * newRank + 2] = shape::elementWiseStride(oldShapeInfo); // ews
// newShapeInfo[2 * newRank + 1] = shape::type(oldShapeInfo); // type
// return true;
// }
//////////////////////////////////////////////////////////////////////
INLINEDEF _CUDA_H bool reshapeC(const int oldRank, const Nd4jLong* oldShapeInfo, const int newRank, const Nd4jLong* newShape, Nd4jLong* newShapeInfo) {
// PLEASE NOTE !: reshaping not-permuted (ews=1) array in f order (except insertion/elimination of unities) will definitely cause allocation of new buffer for array elements
// also this function takes into account identical shapes automatically, namely in that case oldShapeInfo is completely copied to newShapeInfo
newShapeInfo[0] = newRank;
memcpy(newShapeInfo + 1, newShape, newRank * sizeof(Nd4jLong));
Nd4jLong* newStrides = shape::stride(newShapeInfo);
const Nd4jLong* oldShape = shape::shapeOf(const_cast<Nd4jLong*>(oldShapeInfo));
const Nd4jLong* oldStrides = shape::stride(const_cast<Nd4jLong*>(oldShapeInfo));
int oldStart(0), oldStop(1), newStart(0), newStop(1), newDim, oldDim;
while (newStart < newRank && oldStart < oldRank) {
newDim = newShape[newStart];
oldDim = oldShape[oldStart];
while (newDim != oldDim && newDim > 0 && oldDim > 0)
if (newDim < oldDim) newDim *= newShape[newStop++];
else oldDim *= oldShape[oldStop++];
// ------ Check whether the original axes can be combined ------ //
for (int step = 1, i = oldStart; i < oldStop - 1; ++i) {
if(oldShape[i] == 1) // skip unity-dimension and its stride
continue;
while((i + step) < oldRank && oldShape[i + step] == 1)
++step; // skip following unity-dimensions and its strides if such are present
if((i + step) < oldRank && oldStrides[i] != oldShape[i + step] * oldStrides[i + step])
return false; // not contiguous enough
}
newStrides[newStop - 1] = oldStrides[oldStop - 1];
for (int i = newStop - 1; i > newStart; --i)
newStrides[i - 1] = newStrides[i] * newShape[i];
newStart = newStop++;
oldStart = oldStop++;
}
newShapeInfo[2 * newRank + 3] = shape::order(oldShapeInfo); // order
newShapeInfo[2 * newRank + 2] = shape::elementWiseStride(oldShapeInfo); // ews
newShapeInfo[2 * newRank + 1] = shape::type(oldShapeInfo); // type
return true;
}
INLINEDEF _CUDA_H bool canReshape(const int oldRank, Nd4jLong* oldShape, const int newRank, Nd4jLong* newShapeOf, bool isFOrder) {
int oldnd;
Nd4jLong* oldDims = shape::copyOf(oldRank, shape::shapeOf(oldShape));
Nd4jLong* oldStrides = shape::copyOf(oldRank, shape::stride(oldShape));
int np, op, last_stride;
int oldStart, oldStop, ok, newStart, newStop, nk;
auto newStrides = new Nd4jLong[newRank];
oldnd = 0;
/*
* Remove axes with dimension 1 from the old array. They have no effect
* but would need special cases since their strides do not matter.
*/
for (oldStart = 0; oldStart < oldRank; oldStart++) {
if (shape::shapeOf(oldShape)[oldStart] != 1) {
oldDims[oldnd] = shape::shapeOf(oldShape)[oldStart];
oldStrides[oldnd] = shape::stride(oldShape)[oldStart];
oldnd++;
}
}
np = 1;
for (newStart = 0; newStart < newRank; newStart++) {
np *= newShapeOf[newStart];
}
op = 1;
for (oldStart = 0; oldStart < oldnd; oldStart++) {
op *= oldDims[oldStart];
}
if (np != op) {
/* different total sizes; no hope */
delete[] oldDims;
delete[] oldStrides;
delete[] newStrides;
return false;
}
if (np == 0) {
/* the current code does not handle 0-sized arrays, so give up */
delete[] oldDims;
delete[] oldStrides;
delete[] newStrides;
return false;
}
/* oldStart to oldStop and newStart to newStop give the axis ranges currently worked with */
oldStart = 0;
oldStop = 1;
newStart = 0;
newStop = 1;
while (newStart < newRank && oldStart < oldnd) {
np = newShapeOf[newStart];
op = oldDims[oldStart];
while (np != op) {
if (np < op) {
/* Misses trailing 1s, these are handled later */
np *= newShapeOf[newStop++];
} else {
op *= oldDims[oldStop++];
}
}
/* Check whether the original axes can be combined */
for (ok = oldStart; ok < oldStop - 1; ok++) {
if (isFOrder) {
if (oldStrides[ok + 1] != oldDims[ok] * oldStrides[ok]) {
/* not contiguous enough */
delete[] oldDims;
delete[] oldStrides;
delete[] newStrides;
return false;
}
} else {
/* C order */
if (oldStrides[ok] != oldDims[ok + 1] * oldStrides[ok + 1]) {
/* not contiguous enough */
delete[] oldDims;
delete[] oldStrides;
delete[] newStrides;
return false;
}
}
}
/* Calculate new strides for all axes currently worked with */
if (isFOrder) {
newStrides[newStart] = oldStrides[oldStart];
for (nk = newStart + 1; nk < newStop; nk++) {
newStrides[nk] = newStrides[nk - 1] * newShapeOf[nk - 1];
}
} else {
/* C order */
newStrides[newStop - 1] = oldStrides[oldStop - 1];
for (nk = newStop - 1; nk > newStart; nk--) {
newStrides[nk - 1] = newStrides[nk] * newShapeOf[nk];
}
}
newStart = newStop++;
oldStart = oldStop++;
}
delete[] oldDims;
delete[] oldStrides;
delete[] newStrides;
return true;
}
// this function checks the consistence of dimensions with array rank (negative dimensions, too large dimensions, too big number of dimensions)
// also it sorts input array of dimensions, this operation is also necessary for creating TAD object
INLINEDEF _CUDA_H void checkDimensions(const int rank, std::vector<int>& dimensions) {
int dimSize = dimensions.size();
if(dimSize == 0)
throw std::runtime_error("shape::checkDimensions method: array of dimensions is empty!");
// check presence of negative dimensions and if they are present transform them to positive ones -dim -> rank - |dim|
for(auto& dim : dimensions)
if(dim < 0)
dim += rank;
// sort input array of dimensions, this operation is also necessary for creating TAD object in external methods
if (dimSize > 1) {
std::sort(dimensions.begin(), dimensions.end());
// remove duplicates if they are present
dimensions.erase(std::unique(dimensions.begin(), dimensions.end()), dimensions.end());
}
// check whether number of dimensions is to big (>rank)
dimSize = dimensions.size();
if(dimSize > rank)
throw std::runtime_error("shape::checkDimensions method: number of input dimensions is too big ( > rank of array)!");
// check if min dimension is still negative and whether max dimension is bigger then rank-1
if(dimensions[0] < 0 || dimensions.back() > (rank-1))
throw std::runtime_error("shape::checkDimensions method: the negative dimension is still present in input array after transform or the too big dimension is present ( > rank of array) !");
}
// max array is outer for min array, min array is sub-array of max array
// function calculates the coordinates of min array (and saves them into minIdxs) given coordinates of max array (already stored in maxIdxs)
INLINEDEF _CUDA_HD void maxIndToMinInd(Nd4jLong* maxIdxs, Nd4jLong* minIdxs, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude, int dimsLen) {
const auto maxRank = shape::rank(maxShapeInfo);
const auto minRank = shape::rank(minShapeInfo);
// if(minRank >= maxRank)
// throw std::runtime_error("shape::maxIndToMinInd method: rank of min array should be smaller then rank of max array!");
if(dimsLen == -1)
dimsLen = maxRank - minRank; // if size is not given (= -1) then it is equal to ranks difference
if(maxRank == minRank) {
if(dimsToExclude == nullptr) { // --> means dimsToExclude == {0,1,2,...,dimsLen-1}
for (int i = 0; i < maxRank; ++i) {
if(i < dimsLen)
minIdxs[i] = maxIdxs[i];
else {
if(maxIdxs[i] > minShapeInfo[i + 1])
minIdxs[i] = maxIdxs[i] % minShapeInfo[i + 1];
else if(maxIdxs[i] == minShapeInfo[i + 1])
minIdxs[i] = 0;
else
minIdxs[i] = maxIdxs[i];
}
}
}
else {
for (int i = 0, dim = 0; i < maxRank; ++i) {
if(dim < dimsLen && dimsToExclude[dim] == i) {
minIdxs[i] = maxIdxs[i];
++dim;
continue;
}
if(maxIdxs[i] > minShapeInfo[i + 1])
minIdxs[i] = maxIdxs[i] % minShapeInfo[i + 1];
else if(maxIdxs[i] == minShapeInfo[i + 1])
minIdxs[i] = 0;
else
minIdxs[i] = maxIdxs[i];
}
}
}
else {
if(dimsToExclude == nullptr) { // --> means dimsToExclude == {0,1,2,...,dimsLen-1}
for (int i = 0; i < minRank; ++i) {
if(maxIdxs[i + dimsLen] > minShapeInfo[i + 1])
minIdxs[i] = maxIdxs[i + dimsLen] % minShapeInfo[i + 1];
else if(maxIdxs[i + dimsLen] == minShapeInfo[i + 1])
minIdxs[i] = 0;
else
minIdxs[i] = maxIdxs[i + dimsLen];
}
}
else {
for (int minI = 0, maxI = 0, dim = 0; maxI < maxRank; ++maxI) {
if(dim < dimsLen && dimsToExclude[dim] == maxI) {
++dim;
continue;
}
if(maxIdxs[maxI] == minShapeInfo[minI + 1])
minIdxs[minI] = 0;
else if(maxIdxs[maxI] > minShapeInfo[minI + 1])
minIdxs[minI] = maxIdxs[maxI] % minShapeInfo[minI + 1];
else
minIdxs[minI] = maxIdxs[maxI];
++minI;
}
}
}
}
//////////////////////////////////////////////////////////////////////
INLINEDEF _CUDA_HD Nd4jLong subArrayIndex(const Nd4jLong maxIdx, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude, const int dimsLen) {
Nd4jLong maxIdxs[MAX_RANK];
shape::index2coords(shape::rank(maxShapeInfo), const_cast<Nd4jLong *>(maxShapeInfo)+1, const_cast<Nd4jLong&>(maxIdx), maxIdxs, shape::order(maxShapeInfo));
Nd4jLong minIdxs[MAX_RANK];
maxIndToMinInd(maxIdxs, minIdxs, maxShapeInfo, minShapeInfo, dimsToExclude, dimsLen);
return coords2index(shape::rank(minShapeInfo), minShapeInfo + 1, minIdxs);
}
//////////////////////////////////////////////////////////////////////
INLINEDEF _CUDA_HD Nd4jLong subArrayOffset(const Nd4jLong maxIdx, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude, const int dimsLen) {
Nd4jLong maxIdxs[MAX_RANK];
shape::index2coords(shape::rank(maxShapeInfo), const_cast<Nd4jLong *>(maxShapeInfo)+1, const_cast<Nd4jLong&>(maxIdx), maxIdxs, shape::order(maxShapeInfo));
Nd4jLong minIdxs[MAX_RANK];
maxIndToMinInd(maxIdxs, minIdxs, maxShapeInfo, minShapeInfo, dimsToExclude, dimsLen);
return getOffset(0, minShapeInfo + 1, minShapeInfo + shape::rank(minShapeInfo) + 1, minIdxs, shape::rank(minShapeInfo));
}
//////////////////////////////////////////////////////////////////////
INLINEDEF _CUDA_HD int outerArrayOffsets(Nd4jLong* maxOffsets, const Nd4jLong minIdx, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude) {
const auto rankMin = shape::rank(minShapeInfo);
const auto rankMax = shape::rank(maxShapeInfo);
// if(rankMin >= rankMax)
// throw std::runtime_error("shape::subArrayIndex method: rank of min array should be smaller then rank of max array!");
// if(rankMax > MAX_RANK/2)
// throw std::runtime_error("shape::subArrayIndex method: rank of max array should be <= MAX_RANK/2 !");
const auto diff = rankMax - rankMin; // the size of dimsToExclude is equal to diff
Nd4jLong buffer[MAX_RANK];
Nd4jLong* indices = buffer;
Nd4jLong* increment = buffer + MAX_RANK/2;
int N, minI, maxI;
// calculate min per-dim-indices which corresponds to absolute minIdx index
shape::index2coords(rankMin, minShapeInfo + 1, minIdx, indices, order(minShapeInfo));
// transform storage indices to contain per-dim max indices, purpose - memory saving
// fill increment array as well
if(dimsToExclude == nullptr) { // means dimsToExclude == {0,1,2,...,diff-1}
for(minI = rankMin - 1, maxI = rankMax-1; maxI >= diff; --maxI, --minI) {
increment[maxI] = (maxShapeInfo[maxI+1] == minShapeInfo[minI+1]) ? 0 : minShapeInfo[minI+1];
indices[maxI] = indices[minI];
}
for(maxI = 0; maxI < diff; ++maxI) {
increment[maxI] = 1;
indices[maxI] = 0;
}
}
else {
for(N = diff-1, minI = rankMin - 1, maxI = rankMax - 1; maxI >= 0; --maxI) {
if(N >= 0 && dimsToExclude[N] == maxI) {
increment[maxI] = 1;
indices[maxI] = 0;
--N;
}
else {
increment[maxI] = (maxShapeInfo[maxI+1] == minShapeInfo[minI+1]) ? 0 : minShapeInfo[minI+1];
indices[maxI] = indices[minI--];
}
}
}
maxI = rankMax-1;
N = 0;
int step;
maxOffsets[N++] = shape::getOffset(0, maxShapeInfo + 1, maxShapeInfo + rankMax + 1, indices, rankMax);
// nested loops - producing of absolute indices for max array
while(maxI >= 0) {
if(increment[maxI] != 0) {
indices[maxI] += increment[maxI];
if(indices[maxI] >= maxShapeInfo[maxI+1]) {
indices[maxI] %= increment[maxI]; // restore initial value of indices[maxI]
step = -1;
}
else {
maxOffsets[N++] = shape::getOffset(0, maxShapeInfo + 1, maxShapeInfo + rankMax + 1, indices, rankMax);
step = rankMax - 1 - maxI;
}
}
else if(maxI == rankMax - 1)
step = -1;
maxI += step;
}
return N;
}
//////////////////////////////////////////////////////////////////////
INLINEDEF _CUDA_HD int outerArrayIndexes(Nd4jLong* maxIdxs, const Nd4jLong minIdx, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude) {
const auto rankMin = shape::rank(minShapeInfo);
const auto rankMax = shape::rank(maxShapeInfo);
// if(rankMin >= rankMax)
// throw std::runtime_error("shape::subArrayIndex method: rank of min array should be smaller then rank of max array!");
// if(rankMax > MAX_RANK/2)
// throw std::runtime_error("shape::subArrayIndex method: rank of max array should be <= MAX_RANK/2 !");
const auto diff = rankMax - rankMin; // the size of dimsToExclude is equal to diff
Nd4jLong buffer[MAX_RANK];
Nd4jLong* indices = buffer;
Nd4jLong* increment = buffer + MAX_RANK/2;
int N, minI, maxI;
// calculate min per-dim-indices which corresponds to absolute minIdx index
shape::index2coords(rankMin, minShapeInfo + 1, minIdx, indices, order(minShapeInfo));
// transform storage indices to contain per-dim max indices, purpose - memory saving
// fill increment array as well
if(dimsToExclude == nullptr) { // means dimsToExclude == {0,1,2,...,diff-1}
for(minI = rankMin - 1, maxI = rankMax-1; maxI >= diff; --maxI, --minI) {
increment[maxI] = (maxShapeInfo[maxI+1] == minShapeInfo[minI+1]) ? 0 : minShapeInfo[minI+1];
indices[maxI] = indices[minI];
}
for(maxI = 0; maxI < diff; ++maxI) {
increment[maxI] = 1;
indices[maxI] = 0;
}
}
else {
for(N = diff-1, minI = rankMin - 1, maxI = rankMax - 1; maxI >= 0; --maxI) {
if(N >= 0 && dimsToExclude[N] == maxI) {
increment[maxI] = 1;
indices[maxI] = 0;
--N;
}
else {
increment[maxI] = (maxShapeInfo[maxI+1] == minShapeInfo[minI+1]) ? 0 : minShapeInfo[minI+1];
indices[maxI] = indices[minI--];
}
}
}
maxI = rankMax-1;
N = 0;
int step;
maxIdxs[N++] = coords2index(rankMax, maxShapeInfo + 1, indices);
// nested loops - producing of absolute indices for max array
while(maxI >= 0) {
if(increment[maxI] != 0) {
indices[maxI] += increment[maxI];
if(indices[maxI] >= maxShapeInfo[maxI+1]) {
indices[maxI] %= increment[maxI]; // restore initial value of indices[maxI]
step = -1;
}
else {
maxIdxs[N++] = coords2index(rankMax, maxShapeInfo + 1, indices);
step = rankMax - 1 - maxI;
}
}
else if(maxI == rankMax - 1)
step = -1;
maxI += step;
}
return N;
}
INLINEDEF _CUDA_HD void shapeOldScalar(nd4j::DataType dataType, Nd4jLong* const buffer, const char order) {
buffer[0] = 2;
buffer[1] = 1;
buffer[2] = 1;
buffer[3] = 1;
buffer[4] = 1;
buffer[6] = 1;
buffer[7] = (int)order;
nd4j::ArrayOptions::setDataType(buffer, dataType);
}
template <typename T1, typename T2>
INLINEDEF _CUDA_H void convertT(T1 *from, T2 *to, Nd4jLong length) {
for (Nd4jLong e = 0; e < length; e++)
to[e] = (T2) from[e];
};
//////////////////////////////////////////////////////////////////////
INLINEDEF void calcOffsets(const Nd4jLong* shapeInfo, Nd4jLong* offsets, const char order) {
// firstly consider simple case when ews > 0
const Nd4jLong ews = shape::elementWiseStride(shapeInfo);
if(ews > 0) {
// set offset for first sub-array, it is equal to zero always
offsets[0] = 0;
Nd4jLong e = 0;
if(order != shape::order(shapeInfo))
for(int i = 1; i <= shape::rank(shapeInfo); ++i)
if(shapeInfo[i] != 1)
++e; //check whether input is CommonVector
if(order == shape::order(shapeInfo) || e == 1) { // e==1 means common vector
e = 1;
Nd4jLong len = shape::length(shapeInfo);
while(e < len)
offsets[e++] = offsets[e - 1] + ews;
return;
}
}
shape::calcOffsets(shape::rank(shapeInfo), shape::shapeOf(const_cast<Nd4jLong*>(shapeInfo)), shape::stride(const_cast<Nd4jLong*>(shapeInfo)), offsets, order);
}
//////////////////////////////////////////////////////////////////////
INLINEDEF void calcOffsets(const int rank, const Nd4jLong* shape, const Nd4jLong* strides, Nd4jLong* offsets, const char order) {
// if(false) { // tests showed that this code did calculation notably slower even for big N
// Nd4jLong indexes[MAX_RANK];
// PRAGMA_OMP_PARALLEL_FOR_ARGS(private(indexes))
// for (Nd4jLong i = 0; i < N; ++i) {
// shape::index2coords(rank, shape, i, indexes);
// subArrOffsets[i] = 0;
// for (int j = 0; j < rank; ++j)
// if(shape[j] != 1)
// subArrOffsets[i] += indexes[j] * strides[j];
// }
// return;
// }
// set offset for first sub-array, it is equal to zero always
offsets[0] = 0;
Nd4jLong * idx = new Nd4jLong[rank];
Nd4jLong* offsetPerDim = new Nd4jLong[rank];
memset(idx, 0, sizeof(Nd4jLong) * rank);
PRAGMA_OMP_SIMD
for (int k = 0; k < rank; ++k)
offsetPerDim[k] = (shape[k] - 1) * strides[k];
Nd4jLong init = 0, i = 1;
// nested loops - calculation of sub-array offsets
if(order == 'c') {
Nd4jLong rankMinusOne = rank - 1, j = rankMinusOne;
while(j >= 0) {
if(shape[j] == 1) { --j; continue; } // ignore dimensions equal to unity
if(j == rankMinusOne) { // last dimension
for(int l = 1; l < shape[j]; ++l)
offsets[i++] = offsets[i - 1] + strides[j];
--j;
}
else if(idx[j] < shape[j] - 1) {
init += strides[j];
offsets[i++] = init;
++idx[j];
j = rankMinusOne;
}
else {
init -= offsetPerDim[j];
idx[j--] = 0;
}
}
}
else {
Nd4jLong j = 0;
while(j < rank) {
if(shape[j] == 1) { ++j; continue; } // ignore dimensions equal to unity
if(j == 0) { // last dimension
for(int l = 1; l < shape[j]; ++l)
offsets[i++] = offsets[i - 1] + strides[j];
++j;
}
else if(idx[j] < shape[j] - 1) {
init += strides[j];
offsets[i++] = init;
++idx[j];
j = 0;
}
else {
init -= offsetPerDim[j];
idx[j++] = 0;
}
}
}
delete []idx;
delete []offsetPerDim;
}
//////////////////////////////////////////////////////////////////////
INLINEDEF void _CUDA_HD setEws(Nd4jLong* shapeInfo, Nd4jLong len) {
const int rank = shape::rank(shapeInfo);
const Nd4jLong* shape = shape::shapeOf(shapeInfo);
const Nd4jLong* strides = shape::stride(shapeInfo);
const char order = shape::order(shapeInfo);
Nd4jLong* ews = shape::ews(shapeInfo);
if(len == -1) // calculate array length if it is not given
len = shape::length(shapeInfo);
if(len <= 1) { // empty, scalar or unity-vector case
*ews = 1;
return;
}
int nonUnityDim(0);
if(shape::isCommonVector(shapeInfo, nonUnityDim)) {
*ews = strides[nonUnityDim];
return;
}
// check last(c)/first(f) dimension, it should be equal to 1
if((order == 'c' && shape[rank - 1] != 1 && strides[rank - 1] != 1) || (order == 'f' && shape[0] != 1 && strides[0] != 1)) {
*ews = 0;
return;
}
Nd4jLong correctStride = 1;
if(order == 'c') {
for (int i = rank - 2; i >= 0 ; i--) {
correctStride *= shape[i + 1];
if(shape[i] == 1)
continue;
if(correctStride != strides[i]) {
*ews = 0;
return;
}
}
}
else {
for (int i = 1; i < rank; ++i) {
correctStride *= shape[i - 1];
if(shape[i] == 1)
continue;
if(correctStride != strides[i]) {
*ews = 0;
return;
}
}
}
*ews = 1;
}
//////////////////////////////////////////////////////////////////////
INLINEDEF _CUDA_HD void setOrderAndEws(Nd4jLong* shapeInfo, Nd4jLong len) {
const int rank = shape::rank(shapeInfo);
const Nd4jLong* shape = shape::shapeOf(shapeInfo);
const Nd4jLong* strides = shape::stride(shapeInfo);
const char order = shape::order(shapeInfo);
Nd4jLong* ews = shape::ews(shapeInfo);
if(len == -1) // calculate array length if it is not given
len = shape::length(shapeInfo);
if(len <= 1) { // empty, scalar or unity-vector case
*ews = 1;
return;
}
int nonUnityDim(0);
if(shape::isCommonVector(shapeInfo, nonUnityDim)) { // in this case we don't change order
*ews = strides[nonUnityDim];
return;
}
// check if strides are contiguous in respect to c-order
// firstly check last stride, it should be equal to 1
if (strides[rank - 1] == 1 || shape[rank - 1] == 1) { // last dimension is ok, go on through the rest dimensions in reverse order
Nd4jLong correctStride = 1;
bool cContiguous = true;
for (int i = rank - 2; i >= 0 ; i--) {
correctStride *= shape[i + 1];
if(shape[i] == 1)
continue;
if(correctStride != strides[i]) {
cContiguous = false;
break;
}
}
if(cContiguous) {
*ews = 1;
shapeInfo[shape::shapeInfoLength(rank) - 1] = 99;
return;
}
}
// now check if strides are contiguous in respect to f-order
// firstly check first stride, it should be equal to 1
if(strides[0] == 1 || shape[0] == 1) { // first dimension is ok, go on through the rest dimensions
Nd4jLong correctStride = 1;
bool fContiguous = true;
for (int i = 1; i < rank; ++i) {
correctStride *= shape[i - 1];
if(shape[i] == 1)
continue;
if(correctStride != strides[i]) {
fContiguous = false;
break;
}
}
if(fContiguous) {
*ews = 1;
shapeInfo[shape::shapeInfoLength(rank) - 1] = 102;
return;
}
}
*ews = 0;
// if both cContiguous and fContiguous are false then order is preserved
}
//////////////////////////////////////////////////////////////////////
INLINEDEF _CUDA_HD void calcSubArrShapeAndOffsets(const Nd4jLong* wholeShapeInfo, const Nd4jLong numOfSubArrs, const int dimsSize, const int* dimsToExclude, Nd4jLong* subArrShapeInfo, Nd4jLong* subArrOffsets, bool keepUnitiesInShape) {
const int rank = shape::rank(wholeShapeInfo);
if(dimsSize == rank || dimsSize == 0) { // means there is one sub-array and it coincides with whole array, return copy of wholeShapeInfo and one zero offset in this case
memcpy(subArrShapeInfo, wholeShapeInfo, shape::shapeInfoLength(rank) * sizeof(Nd4jLong));
*subArrOffsets = 0;
return;
}
Nd4jLong *outShapeInfo = new Nd4jLong[shape::shapeInfoLength(wholeShapeInfo)];
memcpy(outShapeInfo, wholeShapeInfo, shape::shapeInfoByteLength(wholeShapeInfo));
Nd4jLong* shape = new Nd4jLong[dimsSize];
Nd4jLong* strides = new Nd4jLong[dimsSize];
const int subArrRank = keepUnitiesInShape ? rank : rank - dimsSize;
Nd4jLong* shapeNoUnities = nullptr;
if(!keepUnitiesInShape)
shapeNoUnities = new Nd4jLong[subArrRank];
Nd4jLong subArrLen = 1;
for(int k = subArrRank - 1, j = dimsSize - 1, i = rank - 1; i >= 0; --i) {
if(j >= 0 && i == dimsToExclude[j]) {
strides[j] = shape::stride(outShapeInfo)[i];
shape[j--] = shape::shapeOf(outShapeInfo)[i];
shape::shapeOf(outShapeInfo)[i] = 1;
}
else {
subArrLen *= shape::shapeOf(outShapeInfo)[i];
if(!keepUnitiesInShape)
shapeNoUnities[k--] = shape::shapeOf(outShapeInfo)[i];
}
}
// evaluate ews
shape::setEws(outShapeInfo, subArrLen);
// calculation of sub-array offsets (subArrOffsets)
shape::calcOffsets(dimsSize, shape, strides, subArrOffsets);
// remove unities from outShapeInfo if required
if(!keepUnitiesInShape) {
shape::reshapeC(rank, outShapeInfo, subArrRank, shapeNoUnities, subArrShapeInfo);
delete []shapeNoUnities;
}
else
memcpy(subArrShapeInfo, outShapeInfo, shape::shapeInfoLength(subArrRank) * sizeof(Nd4jLong));
delete []strides;
delete []shape;
delete []outShapeInfo;
}
//////////////////////////////////////////////////////////////////////
INLINEDEF void _CUDA_HD index2coords(const int rank, const Nd4jLong *shape, Nd4jLong index, Nd4jLong *coords, const char order) {
Nd4jLong arrLen = shape::prodLong(shape, rank);
shape::index2coords(rank, shape, index, arrLen, coords, order);
}
INLINEDEF void _CUDA_HD index2coords(const int rank, const Nd4jLong *shape, Nd4jLong index, Nd4jLong arrLen, Nd4jLong *coords, const char order) {
if(order == 'c') {
for(int i = 0; i < rank; i++) {
arrLen /= shape[i];
if(arrLen > 0 && shape[i] > 1) {
coords[i] = index / arrLen;
index %= arrLen;
}
else
coords[i] = 0;
}
}
else {
for(int i = rank - 1; i >= 0; i--) {
arrLen /= shape[i];
if(arrLen > 0 && shape[i] > 1) {
coords[i] = index / arrLen;
index %= arrLen;
}
else
coords[i] = 0;
}
}
}
//////////////////////////////////////////////////////////////////////
INLINEDEF _CUDA_HD void calcOffsets(const Nd4jLong *xShapeInfo, Nd4jLong*& xOffsets, const Nd4jLong *yShapeInfo, Nd4jLong*& yOffsets, const Nd4jLong* zShapeInfo, Nd4jLong*& zOffsets, const char order) {
// we assume all array have same length
const Nd4jLong len = shape::length(xShapeInfo);
const Nd4jLong xEws = shape::elementWiseStride(xShapeInfo);
const Nd4jLong yEws = shape::elementWiseStride(yShapeInfo);
const Nd4jLong zEws = shape::elementWiseStride(zShapeInfo);
const char xOrder = shape::order(xShapeInfo);
const char yOrder = shape::order(yShapeInfo);
const char zOrder = shape::order(zShapeInfo);
const bool shapesSame = shape::shapeEquals(xShapeInfo, yShapeInfo, zShapeInfo);
if (xEws == 1 && yEws == 1 && zEws == 1 && xOrder == yOrder && xOrder == zOrder && (xOrder == 'c' || shapesSame)) {
xOffsets = yOffsets = zOffsets = nullptr;
}
else if(xEws == 1 && yEws == 1 && xOrder == yOrder && (xOrder == 'c' || shape::shapeEquals(xShapeInfo, yShapeInfo))) {
xOffsets = yOffsets = nullptr;
zOffsets = new Nd4jLong[len];
shape::calcOffsets(zShapeInfo, zOffsets, xOrder);
}
else if(xEws == 1 && zEws == 1 && xOrder == zOrder && (xOrder == 'c' || shape::shapeEquals(xShapeInfo, zShapeInfo))) {
xOffsets = zOffsets = nullptr;
yOffsets = new Nd4jLong[len];
shape::calcOffsets(yShapeInfo, yOffsets, xOrder);
}
else if(yEws == 1 && zEws == 1 && yOrder == zOrder && (yOrder == 'c' || shape::shapeEquals(yShapeInfo, zShapeInfo))) {
yOffsets = zOffsets = nullptr;
xOffsets = new Nd4jLong[len];
shape::calcOffsets(xShapeInfo, xOffsets, yOrder);
}
else if(xEws == 1) {
xOffsets = nullptr;
#pragma omp parallel sections
{
#pragma omp section
{
yOffsets = new Nd4jLong[len];
shape::calcOffsets(yShapeInfo, yOffsets, xOrder);
}
#pragma omp section
{
zOffsets = new Nd4jLong[len];
shape::calcOffsets(zShapeInfo, zOffsets, xOrder);
}
}
}
else if(yEws == 1) {
yOffsets = nullptr;
#pragma omp parallel sections
{
#pragma omp section
{
xOffsets = new Nd4jLong[len];
shape::calcOffsets(xShapeInfo, xOffsets, yOrder);
}
#pragma omp section
{
zOffsets = new Nd4jLong[len];
shape::calcOffsets(zShapeInfo, zOffsets, yOrder);
}
}
}
else if(zEws == 1) {
zOffsets = nullptr;
#pragma omp parallel sections
{
#pragma omp section
{
xOffsets = new Nd4jLong[len];
shape::calcOffsets(xShapeInfo, xOffsets, zOrder);
}
#pragma omp section
{
yOffsets = new Nd4jLong[len];
shape::calcOffsets(yShapeInfo, yOffsets, zOrder);
}
}
}
else if(shape::haveSameShapeAndStrides(xShapeInfo, yShapeInfo, zShapeInfo)) {
xOffsets = new Nd4jLong[len];
shape::calcOffsets(xShapeInfo, xOffsets);
yOffsets = zOffsets = xOffsets;
}
else if(shape::haveSameShapeAndStrides(xShapeInfo, yShapeInfo)) {
#pragma omp parallel sections
{
#pragma omp section
{
xOffsets = new Nd4jLong[len];
shape::calcOffsets(xShapeInfo, xOffsets);
}
#pragma omp section
{
zOffsets = new Nd4jLong[len];
shape::calcOffsets(zShapeInfo, zOffsets);
}
}
yOffsets = xOffsets;
}
else if(shape::haveSameShapeAndStrides(xShapeInfo, zShapeInfo)) {
#pragma omp parallel sections
{
#pragma omp section
{
xOffsets = new Nd4jLong[len];
shape::calcOffsets(xShapeInfo, xOffsets);
}
#pragma omp section
{
yOffsets = new Nd4jLong[len];
shape::calcOffsets(yShapeInfo, yOffsets);
}
}
zOffsets = xOffsets;
}
else {
#pragma omp parallel sections
{
#pragma omp section
{
xOffsets = new Nd4jLong[len];
shape::calcOffsets(xShapeInfo, xOffsets);
}
#pragma omp section
{
yOffsets = new Nd4jLong[len];
shape::calcOffsets(yShapeInfo, yOffsets);
}
#pragma omp section
{
zOffsets = new Nd4jLong[len];
shape::calcOffsets(zShapeInfo, zOffsets);
}
}
}
}
//////////////////////////////////////////////////////////////////////
INLINEDEF _CUDA_HD void calcOffsets(const Nd4jLong *xShapeInfo, Nd4jLong*& xOffsets, const Nd4jLong *yShapeInfo, Nd4jLong*& yOffsets, const char order) {
// we assume all array have same length
const Nd4jLong len = shape::length(xShapeInfo);
const Nd4jLong xEws = shape::elementWiseStride(xShapeInfo);
const Nd4jLong yEws = shape::elementWiseStride(yShapeInfo);
const char xOrder = shape::order(xShapeInfo);
const char yOrder = shape::order(yShapeInfo);
const bool shapesSame = shape::shapeEquals(xShapeInfo, yShapeInfo);
if (xEws == 1 && yEws == 1 && xOrder == yOrder && (xOrder == 'c' || shapesSame)) {
xOffsets = yOffsets = nullptr;
}
else if(xEws == 1) {
xOffsets = nullptr;
yOffsets = new Nd4jLong[len];
shape::calcOffsets(yShapeInfo, yOffsets, xOrder);
}
else if(yEws == 1) {
yOffsets = nullptr;
xOffsets = new Nd4jLong[len];
shape::calcOffsets(xShapeInfo, xOffsets, yOrder);
}
else if(shape::haveSameShapeAndStrides(xShapeInfo, yShapeInfo)) {
xOffsets = new Nd4jLong[len];
shape::calcOffsets(xShapeInfo, xOffsets);
yOffsets = xOffsets;
}
else {
#pragma omp parallel sections
{
#pragma omp section
{
xOffsets = new Nd4jLong[len];
shape::calcOffsets(xShapeInfo, xOffsets);
}
#pragma omp section
{
yOffsets = new Nd4jLong[len];
shape::calcOffsets(yShapeInfo, yOffsets);
}
}
}
}
}
#endif /* SHAPE_H_ */ |
GB_binop__isge_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isge_int64)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__isge_int64)
// A.*B function (eWiseMult): GB (_AemultB_03__isge_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isge_int64)
// A*D function (colscale): GB (_AxD__isge_int64)
// D*A function (rowscale): GB (_DxB__isge_int64)
// C+=B function (dense accum): GB (_Cdense_accumB__isge_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__isge_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isge_int64)
// C=scalar+B GB (_bind1st__isge_int64)
// C=scalar+B' GB (_bind1st_tran__isge_int64)
// C=A+scalar GB (_bind2nd__isge_int64)
// C=A'+scalar GB (_bind2nd_tran__isge_int64)
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x >= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGE || GxB_NO_INT64 || GxB_NO_ISGE_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__isge_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isge_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isge_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isge_int64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isge_int64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isge_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__isge_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isge_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__isge_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isge_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isge_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = Bx [p] ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isge_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = Ax [p] ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB (_bind1st_tran__isge_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB (_bind2nd_tran__isge_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
NLmean_propag1dir_sspacing3_tspacing8_sim12_acc12_neighbor5_tau0100.c | /*
* compile: gcc -O3 -std=c99 -o [filename_out] -fopenmp [filename].c -lm -I/usr/include/netcdf-3/ -L/usr/lib64/ -lnetcdf -lnetcdf_c++
* in the terminal: export OMP_NUM_THREADS=3
*/
#include<stdio.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <netcdf.h>
#include <omp.h>
/* This is the name of the data file we will read. */
#define FILENAME_RD "/data/PhDworks/isotropic/NLM/Udiff_spacespacing3.nc"
#define FILENAME_WR "/data/PhDworks/isotropic/NLM/NLmean_propag1dir_sspacing3_tspacing8_sim12_acc12_neighbor5_tau0100.nc"
/* all constants */
#define N_HR 96
#define SCALE_FACTOR_SPACE 3
#define SCALE_FACTOR_TIME 8
#define SIM_HAFTSIZE 12
#define ACC_HAFTSIZE 12
#define NEIGHBOR_HAFTSIZE 5
#define SIM_FULLSIZE (2 * SIM_HAFTSIZE + 1)
#define ACC_FULLSIZE (2 * ACC_HAFTSIZE + 1)
#define NEIGHBOR_FULLSIZE (2 * NEIGHBOR_HAFTSIZE + 1)
#define TAU 0.1
#define NUM_VARS 1
#define NUM_SCALES 2
#define NUM_3DSNAPS 37 /* #3D snapshots */
#define NUM_BLOCKS N_HR/SCALE_FACTOR_TIME - 1 /* #(1:SCALE_FACTOR_TIME:N_HR) - 1*/
#define NUM_2DSNAPS (SCALE_FACTOR_TIME * NUM_BLOCKS + 1) /* #2D snapshots in each 3D block */
#define NDIMS 4
/* Handle errors by printing an error message and exiting with a non-zero status. */
#define ERRCODE 2
#define ERR(e) {printf("Error: %s\n", nc_strerror(e)); exit(ERRCODE);}
/* **********************************************************************************/
/* ****************************** USEFUL FUNCTIONS **********************************/
/* **********************************************************************************/
/*
* get_onesnap: take part of a big array(arr1) and put to small one (arr2): arr2 = arr1[id_start:id_end]
*/
void get_onesnap(double *arr1,double *arr2, int id_start, int id_end)
{
for (int i = id_start; i < id_end + 1; i++)
arr2[i - id_start] = arr1[i];
}
/*
* put_onesnap: assign small array (arr2) into biger one (arr1): arr1[id_start:id_end] = arr2
*/
void put_onesnap(double *arr1,double *arr2, int id_start, int id_end)
{
for (int i = id_start; i < id_end + 1; i++)
arr1[i] = arr2[i - id_start];
}
/*
* norm_by_weight: normalize x[dim] by weight W[dim]
*/
void norm_by_weight(int dim, double *x, double *W)
{
for (int k = 0; k < dim; k++)
x[k] = x[k]/W[k];
}
void add_mat(int dim, double *sum, double *x1, double *x2)
{
for (int k = 0; k < dim; k++)
sum[k] = x1[k] + x2[k];
}
void initialize(int dim, double *x, double val)
{
for (int k = 0; k < dim; k++)
x[k] = val;
}
/* **********************************************************************************/
/* ****************************** NETCDF UTILS **************************************/
/* **********************************************************************************/
/*
* creat_netcdf: create the netcdf file [filename] contain [num_vars] variables
* variable names are [varname]
*/
void create_netcdf(char *filename, int num_vars, char *varname[num_vars])
{
int ncid_wr, retval_wr;
int vel_varid_wr;
int Nt, Nx, Ny, Nz;
int dimids[NDIMS];
/* Create the file. */
if ((retval_wr = nc_create(filename, NC_CLOBBER, &ncid_wr)))
ERR(retval_wr);
/* Define the dimensions. The record dimension is defined to have
* unlimited length - it can grow as needed.*/
if ((retval_wr = nc_def_dim(ncid_wr, "Ny", N_HR, &Ny)))
ERR(retval_wr);
if ((retval_wr = nc_def_dim(ncid_wr, "Nz", N_HR, &Nz)))
ERR(retval_wr);
if ((retval_wr = nc_def_dim(ncid_wr, "Nt", NC_UNLIMITED, &Nt)))
ERR(retval_wr);
/* Define the netCDF variables for the data. */
dimids[0] = Nt;
dimids[1] = Nx;
dimids[2] = Ny;
dimids[3] = Nz;
for (int i = 0; i<num_vars; i++)
{
if ((retval_wr = nc_def_var(ncid_wr, varname[i], NC_FLOAT, NDIMS, dimids, &vel_varid_wr)))
ERR(retval_wr);
}
/* End define mode (SHOULD NOT FORGET THIS!). */
if ((retval_wr = nc_enddef(ncid_wr)))
ERR(retval_wr);
/* Close the file. */
if ((retval_wr = nc_close(ncid_wr)))
ERR(retval_wr);
printf("\n *** SUCCESS creating file: %s!\n", filename);
}
/*
* write_netcdf:
* write into [filename], variable [varname] [snap_end - snap_start + 1 ] snapshots [snaps] started at [snap_start]
*/
void write_netcdf(char *filename, char *varname, size_t *start, size_t *count, double *snaps)
{
int ncid_wr, retval_wr;
int vel_varid_wr;
/* Open the file. NC_WRITE tells netCDF we want read-only access to the file.*/
if ((retval_wr = nc_open(filename, NC_WRITE, &ncid_wr)))
ERR(retval_wr);
/* Get variable*/
if ((retval_wr = nc_inq_varid(ncid_wr, varname, &vel_varid_wr)))
ERR(retval_wr);;
/* Put variable*/
if ((retval_wr = nc_put_vara_double(ncid_wr, vel_varid_wr, start, count, &snaps[0])))
ERR(retval_wr);
/* Close the file. */
if ((retval_wr = nc_close(ncid_wr)))
ERR(retval_wr);
printf("\n *** SUCCESS writing variables \"%s\" to \"%s\"!\n", varname, filename);
}
/*
* read_netcdf: read from [filename], variable [varname] [snap_end - snap_start + 1 ] snapshots [snaps]
* started at [snap_start]
*/
void read_netcdf(char *filename, char *varname, size_t *start, size_t *count, double *snaps)
{
int ncid_rd, retval_rd;
int vel_varid_rd;
/* ******** PREPARE TO READ ************* */
/* Open the file. NC_NOWRITE tells netCDF we want read-only access to the file.*/
if ((retval_rd = nc_open(filename, NC_NOWRITE, &ncid_rd)))
ERR(retval_rd);
/* Get the varids of the velocity in netCDF */
if ((retval_rd = nc_inq_varid(ncid_rd, varname, &vel_varid_rd)))
ERR(retval_rd);
if ((retval_rd = nc_get_vara_double(ncid_rd, vel_varid_rd, start, count, &snaps[0])))
ERR(retval_rd);
/* Close the file, freeing all resources. */
if ((retval_rd = nc_close(ncid_rd)))
ERR(retval_rd);
printf("\n *** SUCCESS reading variables \"%s\" from \"%s\" \n", varname, filename);
}
/* **********************************************************************************/
/* ****************************** ESTIMATE_DISTANCE *********************************/
/* **********************************************************************************/
/*
* estimate_distance: estimate the distances between ref patch and moving patches (prev and after)
* patches are of fixed size (2*SIM_HAFTSIZE+1) x (2*SIM_HAFTSIZE+1)
* reference patch are centered at [center_ref_idy, center_ref_idz]
* moving patches are centered at [center_moving_idy, center_moving_idz]
* dist_all contain 2 elements: distances to moving patches in the prev and after plane
* x_ref: reference plane
* x_prev: previous plane
* x_after: plane after
* ref_ids_y(z): indices of points in reference patch
* moving_ids_y(z): indices of points in moving patch
*/
void generate_grids(int *gridpatches_y, int *gridpatches_z, int * acc_ids)
{
int neighbor_id, sim_id;
int gridyoffset_neighbor[NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE], gridzoffset_neighbor[NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE];
for (int m = 0; m < NEIGHBOR_FULLSIZE; m++)
{
for (int n = 0; n < NEIGHBOR_FULLSIZE; n++)
{
gridyoffset_neighbor[m * NEIGHBOR_FULLSIZE + n] = m - NEIGHBOR_HAFTSIZE;
gridzoffset_neighbor[m * NEIGHBOR_FULLSIZE + n] = n - NEIGHBOR_HAFTSIZE;
}
}
int gridyoffset_sim[SIM_FULLSIZE * SIM_FULLSIZE], gridzoffset_sim[SIM_FULLSIZE * SIM_FULLSIZE];
for (int p = 0; p < SIM_FULLSIZE; p++)
{
for (int q = 0; q < SIM_FULLSIZE; q++)
{
gridyoffset_sim[p * SIM_FULLSIZE + q] = p - SIM_HAFTSIZE;
gridzoffset_sim[p * SIM_FULLSIZE + q] = q - SIM_HAFTSIZE;
}
}
int grid_sim[SIM_FULLSIZE][SIM_FULLSIZE];
for (int p = 0; p < SIM_FULLSIZE; p++)
for (int q = 0; q < SIM_FULLSIZE; q++)
grid_sim[p][q] = p * SIM_FULLSIZE + q;
for (int p = 0; p < ACC_FULLSIZE; p++)
for (int q = 0; q < ACC_FULLSIZE; q++)
acc_ids[p * ACC_FULLSIZE + q] = grid_sim[SIM_HAFTSIZE - ACC_HAFTSIZE + p][SIM_HAFTSIZE - ACC_HAFTSIZE + q];
int valy, valz;
long int grid_id;
for (int i = 0; i < N_HR; i++)
{
for (int j = 0; j < N_HR; j++)
{
for (int neighbor_id = 0; neighbor_id < NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE; neighbor_id++)
{
for (int sim_id = 0; sim_id < SIM_FULLSIZE * SIM_FULLSIZE; sim_id++)
{
grid_id = i * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE
+ j * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE
+ neighbor_id * SIM_FULLSIZE * SIM_FULLSIZE + sim_id;
valy = i + gridyoffset_neighbor[neighbor_id] + gridyoffset_sim[sim_id];
valz = j + gridzoffset_neighbor[neighbor_id] + gridzoffset_sim[sim_id];
if (valy < 0)
gridpatches_y[grid_id] = (N_HR - 1) + valy;
else if (valy > (N_HR - 1))
gridpatches_y[grid_id] = valy - (N_HR - 1);
else
gridpatches_y[grid_id] = valy;
if (valz < 0)
gridpatches_z[grid_id] = (N_HR - 1) + valz;
else if (valz > (N_HR - 1))
gridpatches_z[grid_id] = valz - (N_HR - 1);
else
gridpatches_z[grid_id] = valz;
}
}
}
}
//printf("\n gridpatches_z: %i \n", gridpatches_y[0]);
}
/* **********************************************************************************/
/* ****************************** NLMEAN *********************************/
/* **********************************************************************************/
/*
* estimate_distance: estimate the distances between ref patch and moving patches (prev and after)
* patches are of fixed size (2*SIM_HAFTSIZE+1) x (2*SIM_HAFTSIZE+1)
* reference patch are centered at [center_ref_idy, center_ref_idz]
* moving patches are centered at [center_moving_idy, center_moving_idz]
* dist_all contain 2 elements: distances to moving patches in the prev and after plane
* x_ref: reference plane
* x_prev: previous plane
* x_after: plane after
* ref_ids_y(z): indices of points in reference patch
* moving_ids_y(z): indices of points in moving patch
*/
/*void fusion(double *x_NLM, double *weight_NLM, double *x_ref, double *x_moving, double *x_fusion,
int gridpatches_y[N_HR][N_HR][NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE][SIM_FULLSIZE * SIM_FULLSIZE],
int gridpatches_z[N_HR][N_HR][NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE][SIM_FULLSIZE * SIM_FULLSIZE],
int acc_ids[NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE], int est_idy, int est_idz)*/
void NLmean(double *x_NLM, double *weight_NLM, double *x_ref, double *x_moving, double *x_fusion, int *gridy, int *gridz, int *accids)
{
double norm_fact = 1.0/((double) (SIM_FULLSIZE * SIM_FULLSIZE));
int ri = NEIGHBOR_HAFTSIZE * NEIGHBOR_FULLSIZE + NEIGHBOR_HAFTSIZE;
int est_idy;
#pragma omp parallel for private (est_idy)
for (est_idy = 0; est_idy < N_HR; est_idy++)
for (int est_idz = 0; est_idz < N_HR; est_idz++)
for (int ni = 0; ni < NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE; ni++)
{
int ref_idy, ref_idz, moving_idy, moving_idz;
double du;
double d = 0.0;
long int grid_rid, grid_nid;
for (int si = 0; si < SIM_FULLSIZE * SIM_FULLSIZE; si++)
{
grid_rid = est_idy * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE
+ est_idz * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE + ri * SIM_FULLSIZE * SIM_FULLSIZE + si ;
grid_nid = est_idy * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE
+ est_idz * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE + ni * SIM_FULLSIZE * SIM_FULLSIZE + si;
ref_idy = gridy[grid_rid];
moving_idy = gridy[grid_nid];
ref_idz = gridz[grid_rid];
moving_idz = gridz[grid_nid];
//compute distance btw reference patch and fusion patch
du = x_ref[ref_idy * N_HR + ref_idz] - x_moving[moving_idy * N_HR + moving_idz];
d = d + norm_fact*du*du;
}
double w = exp(-d/(2.0*TAU*TAU));
for(int k = 0; k < ACC_FULLSIZE * ACC_FULLSIZE; k++)
{
int ai = accids[k];
grid_rid = est_idy * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE
+ est_idz * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE + ri * SIM_FULLSIZE * SIM_FULLSIZE + ai ;
grid_nid = est_idy * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE
+ est_idz * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE + ni * SIM_FULLSIZE * SIM_FULLSIZE + ai;
ref_idy = gridy[grid_rid];
moving_idy = gridy[grid_nid];
ref_idz = gridz[grid_rid];
moving_idz = gridz[grid_nid];
x_NLM[ref_idy * N_HR + ref_idz] = x_NLM[ref_idy * N_HR + ref_idz] + w*x_fusion[moving_idy * N_HR + moving_idz];
weight_NLM[ref_idy * N_HR + ref_idz] = weight_NLM[ref_idy * N_HR + ref_idz] + w;
}
//printf("\n w=%f\n ",w);
}
}
void propag_forward(double *Xrec, double *Xlf, int *gridy, int *gridz, int *accids, int t_first, int t_bound1, int t_offset)
{
for (int t_est = t_first + 1; t_est <= t_bound1; t_est++)
{
int t_prev = t_est - 1;
double xref_lf[N_HR * N_HR], xref_hf[N_HR * N_HR], xmov_lf[N_HR * N_HR], xmov_hf[N_HR * N_HR], w[N_HR * N_HR];
get_onesnap(Xlf, xref_lf, t_offset + t_est * N_HR * N_HR, t_offset + (t_est + 1) * N_HR * N_HR - 1);
get_onesnap(Xlf, xmov_lf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1);
//Initialize with zeros
initialize(N_HR * N_HR, xref_hf, 0.0);
initialize(N_HR * N_HR, w, 0.0);
// Propagation from previous planes
NLmean(xref_hf, w, xref_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
// Normalize and put back
norm_by_weight(N_HR*N_HR, xref_hf, w);
put_onesnap(Xrec, xref_hf, t_offset + t_est * N_HR * N_HR, t_offset + (t_est + 1) * N_HR * N_HR - 1);
}
}
void propag_backward(double *Xrec, double *Xlf, int *gridy, int *gridz, int *accids, int t_last, int t_bound2, int t_offset)
{
for (int t_est = t_last - 1; t_est >= t_bound2; --t_est)
{
int t_prev = t_est + 1;
double xref_lf[N_HR * N_HR], xref_hf[N_HR * N_HR], xmov_lf[N_HR * N_HR], xmov_hf[N_HR * N_HR], w[N_HR * N_HR];
get_onesnap(Xlf, xref_lf, t_offset + t_est * N_HR * N_HR, t_offset + (t_est + 1) * N_HR * N_HR - 1);
get_onesnap(Xlf, xmov_lf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1);
//Initialize with zeros
initialize(N_HR * N_HR, xref_hf, 0.0);
initialize(N_HR * N_HR, w, 0.0);
// Propagation from previous planes
NLmean(xref_hf, w, xref_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
// Normalize and put back
norm_by_weight(N_HR*N_HR, xref_hf, w);
put_onesnap(Xrec, xref_hf, t_offset + t_est * N_HR * N_HR, t_offset + (t_est + 1) * N_HR * N_HR - 1);
}
}
void propag_2planes(double *Xrec, double *Xlf, int *gridy, int *gridz, int *accids, int t_mid, int t_offset)
{
double xref_lf[N_HR * N_HR], xref_hf[N_HR * N_HR], xmov_lf[N_HR * N_HR], xmov_hf[N_HR * N_HR], w[N_HR * N_HR];
int t_prev = t_mid - 1;
int t_after = t_mid + 1;
//Initialize with zeros
initialize(N_HR * N_HR, xref_hf, 0.0);
initialize(N_HR * N_HR, w, 0.0);
get_onesnap(Xlf, xref_lf, t_offset + t_mid * N_HR * N_HR, t_offset + (t_mid + 1) * N_HR * N_HR - 1);
get_onesnap(Xlf, xmov_lf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1);
NLmean(xref_hf, w, xref_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
get_onesnap(Xlf, xmov_lf, t_offset + t_after * N_HR * N_HR, t_offset + (t_after + 1) * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + t_after * N_HR * N_HR, t_offset + (t_after + 1) * N_HR * N_HR - 1);
NLmean(xref_hf, w, xref_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
// Normalize and put back
norm_by_weight(N_HR*N_HR, xref_hf, w);
put_onesnap(Xrec, xref_hf, t_offset + t_mid * N_HR * N_HR, t_offset + (t_mid + 1) * N_HR * N_HR - 1);
}
void propag_towardcenter(double *Xrec, double *Xlf, int *gridy, int *gridz, int *accids, int t_first, int t_offset)
{
double xref1_lf[N_HR * N_HR], xref2_lf[N_HR * N_HR], xmov_lf[N_HR * N_HR], xmov_hf[N_HR * N_HR];
double xref1_hf[N_HR * N_HR], w1[N_HR * N_HR], xref2_hf[N_HR * N_HR], w2[N_HR * N_HR];
int tc = (int)SCALE_FACTOR_TIME/2;
if (SCALE_FACTOR_TIME % 2) { tc = (int)SCALE_FACTOR_TIME/2 + 1; }
for (int td = 1; td < tc; td++)
{
int t1 = t_first + td; // bound on left side
int t2 = t_first + SCALE_FACTOR_TIME - td; // bound on right side
// Initialize with zeros
initialize(N_HR * N_HR, xref1_hf, 0.0);
initialize(N_HR * N_HR, w1, 0.0);
initialize(N_HR * N_HR, xref2_hf, 0.0);
initialize(N_HR * N_HR, w2, 0.0);
get_onesnap(Xlf, xref1_lf, t_offset + t1 * N_HR * N_HR, t_offset + (t1 + 1) * N_HR * N_HR - 1);
get_onesnap(Xlf, xref2_lf, t_offset + t2 * N_HR * N_HR, t_offset + (t2 + 1) * N_HR * N_HR - 1);
//Propagate from left bound
get_onesnap(Xlf, xmov_lf, t_offset + (t1 - 1) * N_HR * N_HR, t_offset + t1 * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + (t1 - 1) * N_HR * N_HR, t_offset + t1 * N_HR * N_HR - 1);
NLmean(xref1_hf, w1, xref1_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
NLmean(xref2_hf, w2, xref2_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
//Propagate from right bound
get_onesnap(Xlf, xmov_lf, t_offset + (t2 + 1) * N_HR * N_HR, t_offset + (t2 + 2) * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + (t2 + 1) * N_HR * N_HR, t_offset + (t2 + 2) * N_HR * N_HR - 1);
NLmean(xref1_hf, w1, xref1_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
NLmean(xref2_hf, w2, xref2_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
// Normalize and put back
norm_by_weight(N_HR*N_HR, xref1_hf, w1);
put_onesnap(Xrec, xref1_hf, t_offset + t1 * N_HR * N_HR, t_offset + (t1 + 1) * N_HR * N_HR - 1);
norm_by_weight(N_HR*N_HR, xref2_hf, w2);
put_onesnap(Xrec, xref2_hf, t_offset + t2 * N_HR * N_HR, t_offset + (t2 + 1) * N_HR * N_HR - 1);
}
// Last plane in the center
if (SCALE_FACTOR_TIME % 2 == 0)
{
initialize(N_HR * N_HR, xref1_hf, 0.0);
initialize(N_HR * N_HR, w1, 0.0);
get_onesnap(Xlf, xref1_lf, t_offset + (t_first + tc) * N_HR * N_HR, t_offset + (t_first + tc + 1) * N_HR * N_HR - 1);
get_onesnap(Xlf, xmov_lf, t_offset + (t_first + tc - 1) * N_HR * N_HR, t_offset + (t_first + tc) * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + (t_first + tc - 1) * N_HR * N_HR, t_offset + (t_first + tc) * N_HR * N_HR - 1);
NLmean(xref1_hf, w1, xref1_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
get_onesnap(Xlf, xmov_lf, t_offset + (t_first + tc + 1) * N_HR * N_HR, t_offset + (t_first + tc + 2) * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + (t_first + tc + 1) * N_HR * N_HR, t_offset + (t_first + tc + 2) * N_HR * N_HR - 1);
NLmean(xref1_hf, w1, xref1_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
norm_by_weight(N_HR*N_HR, xref1_hf, w1);
put_onesnap(Xrec, xref1_hf, t_offset + (t_first + tc) * N_HR * N_HR, t_offset + (t_first + tc + 1) * N_HR * N_HR - 1);
}
}
/* **********************************************************************************/
/* ********************************** MAIN FUNCTION *********************************/
/* **********************************************************************************/
int main()
{
/* Creat the file to save results */
char *varnames[NUM_VARS] = {"x_rec_all"};
create_netcdf(FILENAME_WR, NUM_VARS, varnames);
/* Allocate memory */
double *x_fusion_lf_all = (double*)malloc(NUM_3DSNAPS * NUM_2DSNAPS * N_HR * N_HR * sizeof(double));
double *x_fusion_hf_all = (double*)malloc(NUM_3DSNAPS * NUM_2DSNAPS * N_HR * N_HR * sizeof(double));
double *x_rec_all = (double*)malloc(NUM_3DSNAPS * NUM_2DSNAPS * N_HR * N_HR * sizeof(double));
/* read all snapshots */
size_t start_ids[4] = {0, 0, 0, 0};
size_t count_ids[4] = {NUM_3DSNAPS, NUM_2DSNAPS, N_HR, N_HR };
read_netcdf(FILENAME_RD, "Uinterp_all", start_ids, count_ids, x_fusion_lf_all);
read_netcdf(FILENAME_RD, "Udiff_all", start_ids, count_ids, x_fusion_hf_all);
double time_all_start = omp_get_wtime();
double *x_current_lf = (double*)malloc(N_HR * N_HR * sizeof(double));
double *x_current_hf = (double*)malloc(N_HR * N_HR * sizeof(double));
double *x_rec = (double*)malloc(N_HR * N_HR * sizeof(double));
long int grid_size = N_HR * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE;
int *gridpatches_y = (int*)malloc(grid_size * sizeof(int));
int *gridpatches_z = (int*)malloc(grid_size * sizeof(int));
int *acc_ids = (int*)malloc(ACC_FULLSIZE * ACC_FULLSIZE * sizeof(int));
generate_grids(gridpatches_y, gridpatches_z, acc_ids);
for(int snap3d_id = 0; snap3d_id < NUM_3DSNAPS; snap3d_id++)
{
int t_offset = snap3d_id * NUM_2DSNAPS * N_HR*N_HR;
// put first PIV
get_onesnap(x_fusion_hf_all, x_current_hf, t_offset + 0 * N_HR * N_HR, t_offset + 1 * N_HR * N_HR - 1);
put_onesnap(x_rec_all, x_current_hf, t_offset + 0 * N_HR * N_HR, t_offset + 1 * N_HR * N_HR - 1);
int block_id;
for(block_id = 0; block_id < NUM_BLOCKS; block_id++)
{
double time_start = omp_get_wtime();
int t_first = SCALE_FACTOR_TIME*block_id;
int t_last = SCALE_FACTOR_TIME*(block_id+1);
// Put last PIV of the block
get_onesnap(x_fusion_hf_all, x_current_hf, t_offset + t_last * N_HR * N_HR, t_offset + (t_last + 1) * N_HR * N_HR - 1);
put_onesnap(x_rec_all, x_current_hf, t_offset + t_last * N_HR * N_HR, t_offset + (t_last + 1) * N_HR * N_HR - 1);
if (SCALE_FACTOR_TIME % 2)
{
int t_bound1 = t_first + (int)SCALE_FACTOR_TIME/2;
int t_bound2 = t_bound1 + 1;
propag_forward(x_rec_all, x_fusion_lf_all, gridpatches_y, gridpatches_z, acc_ids, t_first, t_bound1, t_offset);
propag_backward(x_rec_all, x_fusion_lf_all, gridpatches_y, gridpatches_z, acc_ids, t_last, t_bound2, t_offset);
}
else
{
int t_mid = t_first + (int)SCALE_FACTOR_TIME/2;
int t_bound1 = t_mid - 1;
int t_bound2 = t_mid + 1;
propag_forward(x_rec_all, x_fusion_lf_all, gridpatches_y, gridpatches_z, acc_ids, t_first, t_bound1, t_offset);
propag_backward(x_rec_all, x_fusion_lf_all, gridpatches_y, gridpatches_z, acc_ids, t_last, t_bound2, t_offset);
propag_2planes(x_rec_all, x_fusion_lf_all, gridpatches_y, gridpatches_z, acc_ids, t_mid, t_offset);
printf("\n Estimated block %i (total 23) in 3D snapshot %i (total 37) in %f seconds \n", block_id, snap3d_id, (double)omp_get_wtime() - time_start);
}
}
}
// Write to file
write_netcdf(FILENAME_WR, "x_rec_all", start_ids, count_ids, x_rec_all);
/* free memory */
free(x_rec); free(x_current_lf); free(x_current_hf);
free(x_rec_all); free(x_fusion_lf_all); free(x_fusion_hf_all);
free(gridpatches_y); free(gridpatches_z); free(acc_ids);
printf("\n FINISH ALL COMPUTATION IN %f SECONDS \n", (double)omp_get_wtime() - time_all_start);
return 1;
}
|
test3.c | int main() {
int x;
#pragma omp parallel
{
0;
if (1) {
2;
if (3) {
x = 0;
4;
#pragma omp barrier
5;
} else {
6;
#pragma omp barrier
7;
}
8;
} else {
9;
if (10) {
11;
#pragma omp barrier
12;
} else {
13;
#pragma omp barrier
x;
14;
}
15;
}
16;
}
x = 10;
17;
}
|
data.c | #include "data.h"
#include "utils.h"
#include "image.h"
#include "cuda.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
list *get_paths(char *filename)
{
char *path;
FILE *file = fopen(filename, "r");
if(!file) file_error(filename);
list *lines = make_list();
while((path=fgetl(file))){
list_insert(lines, path);
}
fclose(file);
return lines;
}
/*
char **get_random_paths_indexes(char **paths, int n, int m, int *indexes)
{
char **random_paths = calloc(n, sizeof(char*));
int i;
pthread_mutex_lock(&mutex);
for(i = 0; i < n; ++i){
int index = rand()%m;
indexes[i] = index;
random_paths[i] = paths[index];
if(i == 0) printf("%s\n", paths[index]);
}
pthread_mutex_unlock(&mutex);
return random_paths;
}
*/
char **get_random_paths(char **paths, int n, int m)
{
char **random_paths = calloc(n, sizeof(char*));
int i;
pthread_mutex_lock(&mutex);
for(i = 0; i < n; ++i){
int index = rand()%m;
random_paths[i] = paths[index];
//if(i == 0) printf("%s\n", paths[index]);
}
pthread_mutex_unlock(&mutex);
return random_paths;
}
char **find_replace_paths(char **paths, int n, char *find, char *replace)
{
char **replace_paths = calloc(n, sizeof(char*));
int i;
for(i = 0; i < n; ++i){
char replaced[4096];
find_replace(paths[i], find, replace, replaced);
replace_paths[i] = copy_string(replaced);
}
return replace_paths;
}
matrix load_image_paths_gray(char **paths, int n, int w, int h)
{
int i;
matrix X;
X.rows = n;
X.vals = calloc(X.rows, sizeof(float*));
X.cols = 0;
for(i = 0; i < n; ++i){
image im = load_image(paths[i], w, h, 3);
image gray = grayscale_image(im);
free_image(im);
im = gray;
X.vals[i] = im.data;
X.cols = im.h*im.w*im.c;
}
return X;
}
matrix load_image_paths(char **paths, int n, int w, int h)
{
int i;
matrix X;
X.rows = n;
X.vals = calloc(X.rows, sizeof(float*));
X.cols = 0;
for(i = 0; i < n; ++i){
image im = load_image_color(paths[i], w, h);
X.vals[i] = im.data;
X.cols = im.h*im.w*im.c;
}
return X;
}
matrix load_image_augment_paths(char **paths, int n, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure, int center)
{
int i;
matrix X;
X.rows = n;
X.vals = calloc(X.rows, sizeof(float*));
X.cols = 0;
for(i = 0; i < n; ++i){
image im = load_image_color(paths[i], 0, 0);
image crop;
if(center){
crop = center_crop_image(im, size, size);
} else {
crop = random_augment_image(im, angle, aspect, min, max, size, size);
}
int flip = rand()%2;
if (flip) flip_image(crop);
random_distort_image(crop, hue, saturation, exposure);
/*
show_image(im, "orig");
show_image(crop, "crop");
cvWaitKey(0);
*/
//grayscale_image_3c(crop);
free_image(im);
X.vals[i] = crop.data;
X.cols = crop.h*crop.w*crop.c;
}
return X;
}
box_label *read_boxes(char *filename, int *n)
{
FILE *file = fopen(filename, "r");
if(!file) file_error(filename);
float x, y, h, w;
int id;
int count = 0;
int size = 64;
box_label *boxes = calloc(size, sizeof(box_label));
while(fscanf(file, "%d %f %f %f %f", &id, &x, &y, &w, &h) == 5){
if(count == size) {
size = size * 2;
boxes = realloc(boxes, size*sizeof(box_label));
}
boxes[count].id = id;
boxes[count].x = x;
boxes[count].y = y;
boxes[count].h = h;
boxes[count].w = w;
boxes[count].left = x - w/2;
boxes[count].right = x + w/2;
boxes[count].top = y - h/2;
boxes[count].bottom = y + h/2;
++count;
}
fclose(file);
*n = count;
return boxes;
}
void randomize_boxes(box_label *b, int n)
{
int i;
for(i = 0; i < n; ++i){
box_label swap = b[i];
int index = rand()%n;
b[i] = b[index];
b[index] = swap;
}
}
void correct_boxes(box_label *boxes, int n, float dx, float dy, float sx, float sy, int flip)
{
int i;
for(i = 0; i < n; ++i){
if(boxes[i].x == 0 && boxes[i].y == 0) {
boxes[i].x = 999999;
boxes[i].y = 999999;
boxes[i].w = 999999;
boxes[i].h = 999999;
continue;
}
boxes[i].left = boxes[i].left * sx - dx;
boxes[i].right = boxes[i].right * sx - dx;
boxes[i].top = boxes[i].top * sy - dy;
boxes[i].bottom = boxes[i].bottom* sy - dy;
if(flip){
float swap = boxes[i].left;
boxes[i].left = 1. - boxes[i].right;
boxes[i].right = 1. - swap;
}
boxes[i].left = constrain(0, 1, boxes[i].left);
boxes[i].right = constrain(0, 1, boxes[i].right);
boxes[i].top = constrain(0, 1, boxes[i].top);
boxes[i].bottom = constrain(0, 1, boxes[i].bottom);
boxes[i].x = (boxes[i].left+boxes[i].right)/2;
boxes[i].y = (boxes[i].top+boxes[i].bottom)/2;
boxes[i].w = (boxes[i].right - boxes[i].left);
boxes[i].h = (boxes[i].bottom - boxes[i].top);
boxes[i].w = constrain(0, 1, boxes[i].w);
boxes[i].h = constrain(0, 1, boxes[i].h);
}
}
void fill_truth_swag(char *path, float *truth, int classes, int flip, float dx, float dy, float sx, float sy)
{
char labelpath[4096];
find_replace(path, "images", "labels", labelpath);
find_replace(labelpath, "JPEGImages", "labels", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
int count = 0;
box_label *boxes = read_boxes(labelpath, &count);
randomize_boxes(boxes, count);
correct_boxes(boxes, count, dx, dy, sx, sy, flip);
float x,y,w,h;
int id;
int i;
for (i = 0; i < count && i < 90; ++i) {
x = boxes[i].x;
y = boxes[i].y;
w = boxes[i].w;
h = boxes[i].h;
id = boxes[i].id;
if (w < .0 || h < .0) continue;
int index = (4+classes) * i;
truth[index++] = x;
truth[index++] = y;
truth[index++] = w;
truth[index++] = h;
if (id < classes) truth[index+id] = 1;
}
free(boxes);
}
void fill_truth_region(char *path, float *truth, int classes, int num_boxes, int flip, float dx, float dy, float sx, float sy)
{
char labelpath[4096];
find_replace(path, "images", "labels", labelpath);
find_replace(labelpath, "JPEGImages", "labels", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".png", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
int count = 0;
box_label *boxes = read_boxes(labelpath, &count);
randomize_boxes(boxes, count);
correct_boxes(boxes, count, dx, dy, sx, sy, flip);
float x,y,w,h;
int id;
int i;
for (i = 0; i < count; ++i) {
x = boxes[i].x;
y = boxes[i].y;
w = boxes[i].w;
h = boxes[i].h;
id = boxes[i].id;
if (w < .005 || h < .005) continue;
int col = (int)(x*num_boxes);
int row = (int)(y*num_boxes);
x = x*num_boxes - col;
y = y*num_boxes - row;
int index = (col+row*num_boxes)*(5+classes);
if (truth[index]) continue;
truth[index++] = 1;
if (id < classes) truth[index+id] = 1;
index += classes;
truth[index++] = x;
truth[index++] = y;
truth[index++] = w;
truth[index++] = h;
}
free(boxes);
}
void load_rle(image im, int *rle, int n)
{
int count = 0;
int curr = 0;
int i,j;
for(i = 0; i < n; ++i){
for(j = 0; j < rle[i]; ++j){
im.data[count++] = curr;
}
curr = 1 - curr;
}
for(; count < im.h*im.w*im.c; ++count){
im.data[count] = curr;
}
}
void or_image(image src, image dest, int c)
{
int i;
for(i = 0; i < src.w*src.h; ++i){
if(src.data[i]) dest.data[dest.w*dest.h*c + i] = 1;
}
}
void exclusive_image(image src)
{
int k, j, i;
int s = src.w*src.h;
for(k = 0; k < src.c-1; ++k){
for(i = 0; i < s; ++i){
if (src.data[k*s + i]){
for(j = k+1; j < src.c; ++j){
src.data[j*s + i] = 0;
}
}
}
}
}
box bound_image(image im)
{
int x,y;
int minx = im.w;
int miny = im.h;
int maxx = 0;
int maxy = 0;
for(y = 0; y < im.h; ++y){
for(x = 0; x < im.w; ++x){
if(im.data[y*im.w + x]){
minx = (x < minx) ? x : minx;
miny = (y < miny) ? y : miny;
maxx = (x > maxx) ? x : maxx;
maxy = (y > maxy) ? y : maxy;
}
}
}
box b = {minx, miny, maxx-minx + 1, maxy-miny + 1};
//printf("%f %f %f %f\n", b.x, b.y, b.w, b.h);
return b;
}
void fill_truth_iseg(char *path, int num_boxes, float *truth, int classes, int w, int h, augment_args aug, int flip, int mw, int mh)
{
char labelpath[4096];
find_replace(path, "images", "mask", labelpath);
find_replace(labelpath, "JPEGImages", "mask", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
FILE *file = fopen(labelpath, "r");
if(!file) file_error(labelpath);
char buff[32788];
int id;
int i = 0;
int j;
image part = make_image(w, h, 1);
while((fscanf(file, "%d %s", &id, buff) == 2) && i < num_boxes){
int n = 0;
int *rle = read_intlist(buff, &n, 0);
load_rle(part, rle, n);
image sized = rotate_crop_image(part, aug.rad, aug.scale, aug.w, aug.h, aug.dx, aug.dy, aug.aspect);
if(flip) flip_image(sized);
image mask = resize_image(sized, mw, mh);
truth[i*(mw*mh+1)] = id;
for(j = 0; j < mw*mh; ++j){
truth[i*(mw*mh + 1) + 1 + j] = mask.data[j];
}
++i;
free_image(mask);
free_image(sized);
free(rle);
}
if(i < num_boxes) truth[i*(mw*mh+1)] = -1;
fclose(file);
free_image(part);
}
void fill_truth_mask(char *path, int num_boxes, float *truth, int classes, int w, int h, augment_args aug, int flip, int mw, int mh)
{
char labelpath[4096];
find_replace(path, "images", "mask", labelpath);
find_replace(labelpath, "JPEGImages", "mask", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
FILE *file = fopen(labelpath, "r");
if(!file) file_error(labelpath);
char buff[32788];
int id;
int i = 0;
image part = make_image(w, h, 1);
while((fscanf(file, "%d %s", &id, buff) == 2) && i < num_boxes){
int n = 0;
int *rle = read_intlist(buff, &n, 0);
load_rle(part, rle, n);
image sized = rotate_crop_image(part, aug.rad, aug.scale, aug.w, aug.h, aug.dx, aug.dy, aug.aspect);
if(flip) flip_image(sized);
box b = bound_image(sized);
if(b.w > 0){
image crop = crop_image(sized, b.x, b.y, b.w, b.h);
image mask = resize_image(crop, mw, mh);
truth[i*(4 + mw*mh + 1) + 0] = (b.x + b.w/2.)/sized.w;
truth[i*(4 + mw*mh + 1) + 1] = (b.y + b.h/2.)/sized.h;
truth[i*(4 + mw*mh + 1) + 2] = b.w/sized.w;
truth[i*(4 + mw*mh + 1) + 3] = b.h/sized.h;
int j;
for(j = 0; j < mw*mh; ++j){
truth[i*(4 + mw*mh + 1) + 4 + j] = mask.data[j];
}
truth[i*(4 + mw*mh + 1) + 4 + mw*mh] = id;
free_image(crop);
free_image(mask);
++i;
}
free_image(sized);
free(rle);
}
fclose(file);
free_image(part);
}
void fill_truth_detection(char *path, int num_boxes, float *truth, int classes, int flip, float dx, float dy, float sx, float sy)
{
char labelpath[4096];
find_replace(path, "images", "labels", labelpath);
find_replace(labelpath, "JPEGImages", "labels", labelpath);
find_replace(labelpath, "raw", "labels", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".png", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
int count = 0;
box_label *boxes = read_boxes(labelpath, &count);
randomize_boxes(boxes, count);
correct_boxes(boxes, count, dx, dy, sx, sy, flip);
if(count > num_boxes) count = num_boxes;
float x,y,w,h;
int id;
int i;
int sub = 0;
for (i = 0; i < count; ++i) {
x = boxes[i].x;
y = boxes[i].y;
w = boxes[i].w;
h = boxes[i].h;
id = boxes[i].id;
if ((w < .001 || h < .001)) {
++sub;
continue;
}
truth[(i-sub)*5+0] = x;
truth[(i-sub)*5+1] = y;
truth[(i-sub)*5+2] = w;
truth[(i-sub)*5+3] = h;
truth[(i-sub)*5+4] = id;
}
free(boxes);
}
#define NUMCHARS 37
void print_letters(float *pred, int n)
{
int i;
for(i = 0; i < n; ++i){
int index = max_index(pred+i*NUMCHARS, NUMCHARS);
printf("%c", int_to_alphanum(index));
}
printf("\n");
}
void fill_truth_captcha(char *path, int n, float *truth)
{
char *begin = strrchr(path, '/');
++begin;
int i;
for(i = 0; i < strlen(begin) && i < n && begin[i] != '.'; ++i){
int index = alphanum_to_int(begin[i]);
if(index > 35) printf("Bad %c\n", begin[i]);
truth[i*NUMCHARS+index] = 1;
}
for(;i < n; ++i){
truth[i*NUMCHARS + NUMCHARS-1] = 1;
}
}
data load_data_captcha(char **paths, int n, int m, int k, int w, int h)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.y = make_matrix(n, k*NUMCHARS);
int i;
for(i = 0; i < n; ++i){
fill_truth_captcha(paths[i], k, d.y.vals[i]);
}
if(m) free(paths);
return d;
}
data load_data_captcha_encode(char **paths, int n, int m, int w, int h)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.X.cols = 17100;
d.y = d.X;
if(m) free(paths);
return d;
}
void fill_truth(char *path, char **labels, int k, float *truth)
{
int i;
memset(truth, 0, k*sizeof(float));
int count = 0;
for(i = 0; i < k; ++i){
if(strstr(path, labels[i])){
truth[i] = 1;
++count;
//printf("%s %s %d\n", path, labels[i], i);
}
}
//if(count != 1 && (k != 1 || count != 0)) printf("Too many or too few labels: %d, %s\n", count, path);
}
void fill_hierarchy(float *truth, int k, tree *hierarchy)
{
int j;
for(j = 0; j < k; ++j){
if(truth[j]){
int parent = hierarchy->parent[j];
while(parent >= 0){
truth[parent] = 1;
parent = hierarchy->parent[parent];
}
}
}
int i;
int count = 0;
for(j = 0; j < hierarchy->groups; ++j){
//printf("%d\n", count);
int mask = 1;
for(i = 0; i < hierarchy->group_size[j]; ++i){
if(truth[count + i]){
mask = 0;
break;
}
}
if (mask) {
for(i = 0; i < hierarchy->group_size[j]; ++i){
truth[count + i] = SECRET_NUM;
}
}
count += hierarchy->group_size[j];
}
}
matrix load_regression_labels_paths(char **paths, int n, int k)
{
matrix y = make_matrix(n, k);
int i,j;
for(i = 0; i < n; ++i){
char labelpath[4096];
find_replace(paths[i], "images", "labels", labelpath);
find_replace(labelpath, "JPEGImages", "labels", labelpath);
find_replace(labelpath, ".BMP", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPeG", ".txt", labelpath);
find_replace(labelpath, ".Jpeg", ".txt", labelpath);
find_replace(labelpath, ".PNG", ".txt", labelpath);
find_replace(labelpath, ".TIF", ".txt", labelpath);
find_replace(labelpath, ".bmp", ".txt", labelpath);
find_replace(labelpath, ".jpeg", ".txt", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".png", ".txt", labelpath);
find_replace(labelpath, ".tif", ".txt", labelpath);
FILE *file = fopen(labelpath, "r");
for(j = 0; j < k; ++j){
fscanf(file, "%f", &(y.vals[i][j]));
}
fclose(file);
}
return y;
}
matrix load_labels_paths(char **paths, int n, char **labels, int k, tree *hierarchy)
{
matrix y = make_matrix(n, k);
int i;
for(i = 0; i < n && labels; ++i){
fill_truth(paths[i], labels, k, y.vals[i]);
if(hierarchy){
fill_hierarchy(y.vals[i], k, hierarchy);
}
}
return y;
}
matrix load_tags_paths(char **paths, int n, int k)
{
matrix y = make_matrix(n, k);
int i;
//int count = 0;
for(i = 0; i < n; ++i){
char label[4096];
find_replace(paths[i], "images", "labels", label);
find_replace(label, ".jpg", ".txt", label);
FILE *file = fopen(label, "r");
if (!file) continue;
//++count;
int tag;
while(fscanf(file, "%d", &tag) == 1){
if(tag < k){
y.vals[i][tag] = 1;
}
}
fclose(file);
}
//printf("%d/%d\n", count, n);
return y;
}
char **get_labels(char *filename)
{
list *plist = get_paths(filename);
char **labels = (char **)list_to_array(plist);
free_list(plist);
return labels;
}
void free_data(data d)
{
if(!d.shallow){
free_matrix(d.X);
free_matrix(d.y);
}else{
free(d.X.vals);
free(d.y.vals);
}
}
image get_segmentation_image(char *path, int w, int h, int classes)
{
char labelpath[4096];
find_replace(path, "images", "mask", labelpath);
find_replace(labelpath, "JPEGImages", "mask", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
image mask = make_image(w, h, classes);
FILE *file = fopen(labelpath, "r");
if(!file) file_error(labelpath);
char buff[32788];
int id;
image part = make_image(w, h, 1);
while(fscanf(file, "%d %s", &id, buff) == 2){
int n = 0;
int *rle = read_intlist(buff, &n, 0);
load_rle(part, rle, n);
or_image(part, mask, id);
free(rle);
}
//exclusive_image(mask);
fclose(file);
free_image(part);
return mask;
}
image get_segmentation_image2(char *path, int w, int h, int classes)
{
char labelpath[4096];
find_replace(path, "images", "mask", labelpath);
find_replace(labelpath, "JPEGImages", "mask", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
image mask = make_image(w, h, classes+1);
int i;
for(i = 0; i < w*h; ++i){
mask.data[w*h*classes + i] = 1;
}
FILE *file = fopen(labelpath, "r");
if(!file) file_error(labelpath);
char buff[32788];
int id;
image part = make_image(w, h, 1);
while(fscanf(file, "%d %s", &id, buff) == 2){
int n = 0;
int *rle = read_intlist(buff, &n, 0);
load_rle(part, rle, n);
or_image(part, mask, id);
for(i = 0; i < w*h; ++i){
if(part.data[i]) mask.data[w*h*classes + i] = 0;
}
free(rle);
}
//exclusive_image(mask);
fclose(file);
free_image(part);
return mask;
}
data load_data_seg(int n, char **paths, int m, int w, int h, int classes, int min, int max, float angle, float aspect, float hue, float saturation, float exposure, int div)
{
char **random_paths = get_random_paths(paths, n, m);
int i;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
d.y.rows = n;
d.y.cols = h*w*classes/div/div;
d.y.vals = calloc(d.X.rows, sizeof(float*));
for(i = 0; i < n; ++i){
image orig = load_image_color(random_paths[i], 0, 0);
augment_args a = random_augment_args(orig, angle, aspect, min, max, w, h);
image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect);
int flip = rand()%2;
if(flip) flip_image(sized);
random_distort_image(sized, hue, saturation, exposure);
d.X.vals[i] = sized.data;
image mask = get_segmentation_image(random_paths[i], orig.w, orig.h, classes);
//image mask = make_image(orig.w, orig.h, classes+1);
image sized_m = rotate_crop_image(mask, a.rad, a.scale/div, a.w/div, a.h/div, a.dx/div, a.dy/div, a.aspect);
if(flip) flip_image(sized_m);
d.y.vals[i] = sized_m.data;
free_image(orig);
free_image(mask);
/*
image rgb = mask_to_rgb(sized_m, classes);
show_image(rgb, "part");
show_image(sized, "orig");
cvWaitKey(0);
free_image(rgb);
*/
}
free(random_paths);
return d;
}
data load_data_iseg(int n, char **paths, int m, int w, int h, int classes, int boxes, int div, int min, int max, float angle, float aspect, float hue, float saturation, float exposure)
{
char **random_paths = get_random_paths(paths, n, m);
int i;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
d.y = make_matrix(n, (((w/div)*(h/div))+1)*boxes);
for(i = 0; i < n; ++i){
image orig = load_image_color(random_paths[i], 0, 0);
augment_args a = random_augment_args(orig, angle, aspect, min, max, w, h);
image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect);
int flip = rand()%2;
if(flip) flip_image(sized);
random_distort_image(sized, hue, saturation, exposure);
d.X.vals[i] = sized.data;
//show_image(sized, "image");
fill_truth_iseg(random_paths[i], boxes, d.y.vals[i], classes, orig.w, orig.h, a, flip, w/div, h/div);
free_image(orig);
/*
image rgb = mask_to_rgb(sized_m, classes);
show_image(rgb, "part");
show_image(sized, "orig");
cvWaitKey(0);
free_image(rgb);
*/
}
free(random_paths);
return d;
}
data load_data_mask(int n, char **paths, int m, int w, int h, int classes, int boxes, int coords, int min, int max, float angle, float aspect, float hue, float saturation, float exposure)
{
char **random_paths = get_random_paths(paths, n, m);
int i;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
d.y = make_matrix(n, (coords+1)*boxes);
for(i = 0; i < n; ++i){
image orig = load_image_color(random_paths[i], 0, 0);
augment_args a = random_augment_args(orig, angle, aspect, min, max, w, h);
image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect);
int flip = rand()%2;
if(flip) flip_image(sized);
random_distort_image(sized, hue, saturation, exposure);
d.X.vals[i] = sized.data;
//show_image(sized, "image");
fill_truth_mask(random_paths[i], boxes, d.y.vals[i], classes, orig.w, orig.h, a, flip, 14, 14);
free_image(orig);
/*
image rgb = mask_to_rgb(sized_m, classes);
show_image(rgb, "part");
show_image(sized, "orig");
cvWaitKey(0);
free_image(rgb);
*/
}
free(random_paths);
return d;
}
data load_data_region(int n, char **paths, int m, int w, int h, int size, int classes, float jitter, float hue, float saturation, float exposure)
{
char **random_paths = get_random_paths(paths, n, m);
int i;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
int k = size*size*(5+classes);
d.y = make_matrix(n, k);
for(i = 0; i < n; ++i){
image orig = load_image_color(random_paths[i], 0, 0);
int oh = orig.h;
int ow = orig.w;
int dw = (ow*jitter);
int dh = (oh*jitter);
int pleft = rand_uniform(-dw, dw);
int pright = rand_uniform(-dw, dw);
int ptop = rand_uniform(-dh, dh);
int pbot = rand_uniform(-dh, dh);
int swidth = ow - pleft - pright;
int sheight = oh - ptop - pbot;
float sx = (float)swidth / ow;
float sy = (float)sheight / oh;
int flip = rand()%2;
image cropped = crop_image(orig, pleft, ptop, swidth, sheight);
float dx = ((float)pleft/ow)/sx;
float dy = ((float)ptop /oh)/sy;
image sized = resize_image(cropped, w, h);
if(flip) flip_image(sized);
random_distort_image(sized, hue, saturation, exposure);
d.X.vals[i] = sized.data;
fill_truth_region(random_paths[i], d.y.vals[i], classes, size, flip, dx, dy, 1./sx, 1./sy);
free_image(orig);
free_image(cropped);
}
free(random_paths);
return d;
}
data load_data_compare(int n, char **paths, int m, int classes, int w, int h)
{
if(m) paths = get_random_paths(paths, 2*n, m);
int i,j;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*6;
int k = 2*(classes);
d.y = make_matrix(n, k);
for(i = 0; i < n; ++i){
image im1 = load_image_color(paths[i*2], w, h);
image im2 = load_image_color(paths[i*2+1], w, h);
d.X.vals[i] = calloc(d.X.cols, sizeof(float));
memcpy(d.X.vals[i], im1.data, h*w*3*sizeof(float));
memcpy(d.X.vals[i] + h*w*3, im2.data, h*w*3*sizeof(float));
int id;
float iou;
char imlabel1[4096];
char imlabel2[4096];
find_replace(paths[i*2], "imgs", "labels", imlabel1);
find_replace(imlabel1, "jpg", "txt", imlabel1);
FILE *fp1 = fopen(imlabel1, "r");
while(fscanf(fp1, "%d %f", &id, &iou) == 2){
if (d.y.vals[i][2*id] < iou) d.y.vals[i][2*id] = iou;
}
find_replace(paths[i*2+1], "imgs", "labels", imlabel2);
find_replace(imlabel2, "jpg", "txt", imlabel2);
FILE *fp2 = fopen(imlabel2, "r");
while(fscanf(fp2, "%d %f", &id, &iou) == 2){
if (d.y.vals[i][2*id + 1] < iou) d.y.vals[i][2*id + 1] = iou;
}
for (j = 0; j < classes; ++j){
if (d.y.vals[i][2*j] > .5 && d.y.vals[i][2*j+1] < .5){
d.y.vals[i][2*j] = 1;
d.y.vals[i][2*j+1] = 0;
} else if (d.y.vals[i][2*j] < .5 && d.y.vals[i][2*j+1] > .5){
d.y.vals[i][2*j] = 0;
d.y.vals[i][2*j+1] = 1;
} else {
d.y.vals[i][2*j] = SECRET_NUM;
d.y.vals[i][2*j+1] = SECRET_NUM;
}
}
fclose(fp1);
fclose(fp2);
free_image(im1);
free_image(im2);
}
if(m) free(paths);
return d;
}
data load_data_swag(char **paths, int n, int classes, float jitter)
{
int index = rand()%n;
char *random_path = paths[index];
image orig = load_image_color(random_path, 0, 0);
int h = orig.h;
int w = orig.w;
data d = {0};
d.shallow = 0;
d.w = w;
d.h = h;
d.X.rows = 1;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
int k = (4+classes)*90;
d.y = make_matrix(1, k);
int dw = w*jitter;
int dh = h*jitter;
int pleft = rand_uniform(-dw, dw);
int pright = rand_uniform(-dw, dw);
int ptop = rand_uniform(-dh, dh);
int pbot = rand_uniform(-dh, dh);
int swidth = w - pleft - pright;
int sheight = h - ptop - pbot;
float sx = (float)swidth / w;
float sy = (float)sheight / h;
int flip = rand()%2;
image cropped = crop_image(orig, pleft, ptop, swidth, sheight);
float dx = ((float)pleft/w)/sx;
float dy = ((float)ptop /h)/sy;
image sized = resize_image(cropped, w, h);
if(flip) flip_image(sized);
d.X.vals[0] = sized.data;
fill_truth_swag(random_path, d.y.vals[0], classes, flip, dx, dy, 1./sx, 1./sy);
free_image(orig);
free_image(cropped);
return d;
}
data load_data_detection(int n, char **paths, int m, int w, int h, int boxes, int classes, float jitter, float hue, float saturation, float exposure)
{
char **random_paths = get_random_paths(paths, n, m);
int i;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
d.y = make_matrix(n, 5*boxes);
for(i = 0; i < n; ++i){
image orig = load_image_color(random_paths[i], 0, 0);
image sized = make_image(w, h, orig.c);
fill_image(sized, .5);
float dw = jitter * orig.w;
float dh = jitter * orig.h;
float new_ar = (orig.w + rand_uniform(-dw, dw)) / (orig.h + rand_uniform(-dh, dh));
//float scale = rand_uniform(.25, 2);
float scale = 1;
float nw, nh;
if(new_ar < 1){
nh = scale * h;
nw = nh * new_ar;
} else {
nw = scale * w;
nh = nw / new_ar;
}
float dx = rand_uniform(0, w - nw);
float dy = rand_uniform(0, h - nh);
place_image(orig, nw, nh, dx, dy, sized);
random_distort_image(sized, hue, saturation, exposure);
int flip = rand()%2;
if(flip) flip_image(sized);
d.X.vals[i] = sized.data;
fill_truth_detection(random_paths[i], boxes, d.y.vals[i], classes, flip, -dx/w, -dy/h, nw/w, nh/h);
free_image(orig);
}
free(random_paths);
return d;
}
void *load_thread(void *ptr)
{
//printf("Loading data: %d\n", rand());
load_args a = *(struct load_args*)ptr;
if(a.exposure == 0) a.exposure = 1;
if(a.saturation == 0) a.saturation = 1;
if(a.aspect == 0) a.aspect = 1;
if (a.type == OLD_CLASSIFICATION_DATA){
*a.d = load_data_old(a.paths, a.n, a.m, a.labels, a.classes, a.w, a.h);
} else if (a.type == REGRESSION_DATA){
*a.d = load_data_regression(a.paths, a.n, a.m, a.classes, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure);
} else if (a.type == CLASSIFICATION_DATA){
*a.d = load_data_augment(a.paths, a.n, a.m, a.labels, a.classes, a.hierarchy, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure, a.center);
} else if (a.type == SUPER_DATA){
*a.d = load_data_super(a.paths, a.n, a.m, a.w, a.h, a.scale);
} else if (a.type == WRITING_DATA){
*a.d = load_data_writing(a.paths, a.n, a.m, a.w, a.h, a.out_w, a.out_h);
} else if (a.type == ISEG_DATA){
*a.d = load_data_iseg(a.n, a.paths, a.m, a.w, a.h, a.classes, a.num_boxes, a.scale, a.min, a.max, a.angle, a.aspect, a.hue, a.saturation, a.exposure);
} else if (a.type == INSTANCE_DATA){
*a.d = load_data_mask(a.n, a.paths, a.m, a.w, a.h, a.classes, a.num_boxes, a.coords, a.min, a.max, a.angle, a.aspect, a.hue, a.saturation, a.exposure);
} else if (a.type == SEGMENTATION_DATA){
*a.d = load_data_seg(a.n, a.paths, a.m, a.w, a.h, a.classes, a.min, a.max, a.angle, a.aspect, a.hue, a.saturation, a.exposure, a.scale);
} else if (a.type == REGION_DATA){
*a.d = load_data_region(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure);
} else if (a.type == DETECTION_DATA){
*a.d = load_data_detection(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure);
} else if (a.type == SWAG_DATA){
*a.d = load_data_swag(a.paths, a.n, a.classes, a.jitter);
} else if (a.type == COMPARE_DATA){
*a.d = load_data_compare(a.n, a.paths, a.m, a.classes, a.w, a.h);
} else if (a.type == IMAGE_DATA){
*(a.im) = load_image_color(a.path, 0, 0);
*(a.resized) = resize_image(*(a.im), a.w, a.h);
} else if (a.type == LETTERBOX_DATA){
*(a.im) = load_image_color(a.path, 0, 0);
*(a.resized) = letterbox_image(*(a.im), a.w, a.h);
} else if (a.type == TAG_DATA){
*a.d = load_data_tag(a.paths, a.n, a.m, a.classes, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure);
}
free(ptr);
return 0;
}
pthread_t load_data_in_thread(load_args args)
{
pthread_t thread;
struct load_args *ptr = calloc(1, sizeof(struct load_args));
*ptr = args;
if(pthread_create(&thread, 0, load_thread, ptr)) error("Thread creation failed");
return thread;
}
void *load_threads(void *ptr)
{
int i;
load_args args = *(load_args *)ptr;
if (args.threads == 0) args.threads = 1;
data *out = args.d;
int total = args.n;
free(ptr);
data *buffers = calloc(args.threads, sizeof(data));
pthread_t *threads = calloc(args.threads, sizeof(pthread_t));
for(i = 0; i < args.threads; ++i){
args.d = buffers + i;
args.n = (i+1) * total/args.threads - i * total/args.threads;
threads[i] = load_data_in_thread(args);
}
for(i = 0; i < args.threads; ++i){
pthread_join(threads[i], 0);
}
*out = concat_datas(buffers, args.threads);
out->shallow = 0;
for(i = 0; i < args.threads; ++i){
buffers[i].shallow = 1;
free_data(buffers[i]);
}
free(buffers);
free(threads);
return 0;
}
void load_data_blocking(load_args args)
{
struct load_args *ptr = calloc(1, sizeof(struct load_args));
*ptr = args;
load_thread(ptr);
}
pthread_t load_data(load_args args)
{
pthread_t thread;
struct load_args *ptr = calloc(1, sizeof(struct load_args));
*ptr = args;
if(pthread_create(&thread, 0, load_threads, ptr)) error("Thread creation failed");
return thread;
}
data load_data_writing(char **paths, int n, int m, int w, int h, int out_w, int out_h)
{
if(m) paths = get_random_paths(paths, n, m);
char **replace_paths = find_replace_paths(paths, n, ".png", "-label.png");
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.y = load_image_paths_gray(replace_paths, n, out_w, out_h);
if(m) free(paths);
int i;
for(i = 0; i < n; ++i) free(replace_paths[i]);
free(replace_paths);
return d;
}
data load_data_old(char **paths, int n, int m, char **labels, int k, int w, int h)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.y = load_labels_paths(paths, n, labels, k, 0);
if(m) free(paths);
return d;
}
/*
data load_data_study(char **paths, int n, int m, char **labels, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure)
{
data d = {0};
d.indexes = calloc(n, sizeof(int));
if(m) paths = get_random_paths_indexes(paths, n, m, d.indexes);
d.shallow = 0;
d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure);
d.y = load_labels_paths(paths, n, labels, k);
if(m) free(paths);
return d;
}
*/
data load_data_super(char **paths, int n, int m, int w, int h, int scale)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
int i;
d.X.rows = n;
d.X.vals = calloc(n, sizeof(float*));
d.X.cols = w*h*3;
d.y.rows = n;
d.y.vals = calloc(n, sizeof(float*));
d.y.cols = w*scale * h*scale * 3;
for(i = 0; i < n; ++i){
image im = load_image_color(paths[i], 0, 0);
image crop = random_crop_image(im, w*scale, h*scale);
int flip = rand()%2;
if (flip) flip_image(crop);
image resize = resize_image(crop, w, h);
d.X.vals[i] = resize.data;
d.y.vals[i] = crop.data;
free_image(im);
}
if(m) free(paths);
return d;
}
data load_data_regression(char **paths, int n, int m, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, 0);
d.y = load_regression_labels_paths(paths, n, k);
if(m) free(paths);
return d;
}
data select_data(data *orig, int *inds)
{
data d = {0};
d.shallow = 1;
d.w = orig[0].w;
d.h = orig[0].h;
d.X.rows = orig[0].X.rows;
d.y.rows = orig[0].X.rows;
d.X.cols = orig[0].X.cols;
d.y.cols = orig[0].y.cols;
d.X.vals = calloc(orig[0].X.rows, sizeof(float *));
d.y.vals = calloc(orig[0].y.rows, sizeof(float *));
int i;
for(i = 0; i < d.X.rows; ++i){
d.X.vals[i] = orig[inds[i]].X.vals[i];
d.y.vals[i] = orig[inds[i]].y.vals[i];
}
return d;
}
data *tile_data(data orig, int divs, int size)
{
data *ds = calloc(divs*divs, sizeof(data));
int i, j;
#pragma omp parallel for
for(i = 0; i < divs*divs; ++i){
data d;
d.shallow = 0;
d.w = orig.w/divs * size;
d.h = orig.h/divs * size;
d.X.rows = orig.X.rows;
d.X.cols = d.w*d.h*3;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.y = copy_matrix(orig.y);
#pragma omp parallel for
for(j = 0; j < orig.X.rows; ++j){
int x = (i%divs) * orig.w / divs - (d.w - orig.w/divs)/2;
int y = (i/divs) * orig.h / divs - (d.h - orig.h/divs)/2;
image im = float_to_image(orig.w, orig.h, 3, orig.X.vals[j]);
d.X.vals[j] = crop_image(im, x, y, d.w, d.h).data;
}
ds[i] = d;
}
return ds;
}
data resize_data(data orig, int w, int h)
{
data d = {0};
d.shallow = 0;
d.w = w;
d.h = h;
int i;
d.X.rows = orig.X.rows;
d.X.cols = w*h*3;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.y = copy_matrix(orig.y);
#pragma omp parallel for
for(i = 0; i < orig.X.rows; ++i){
image im = float_to_image(orig.w, orig.h, 3, orig.X.vals[i]);
d.X.vals[i] = resize_image(im, w, h).data;
}
return d;
}
data load_data_augment(char **paths, int n, int m, char **labels, int k, tree *hierarchy, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure, int center)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.w=size;
d.h=size;
d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, center);
d.y = load_labels_paths(paths, n, labels, k, hierarchy);
if(m) free(paths);
return d;
}
data load_data_tag(char **paths, int n, int m, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.w = size;
d.h = size;
d.shallow = 0;
d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, 0);
d.y = load_tags_paths(paths, n, k);
if(m) free(paths);
return d;
}
matrix concat_matrix(matrix m1, matrix m2)
{
int i, count = 0;
matrix m;
m.cols = m1.cols;
m.rows = m1.rows+m2.rows;
m.vals = calloc(m1.rows + m2.rows, sizeof(float*));
for(i = 0; i < m1.rows; ++i){
m.vals[count++] = m1.vals[i];
}
for(i = 0; i < m2.rows; ++i){
m.vals[count++] = m2.vals[i];
}
return m;
}
data concat_data(data d1, data d2)
{
data d = {0};
d.shallow = 1;
d.X = concat_matrix(d1.X, d2.X);
d.y = concat_matrix(d1.y, d2.y);
d.w = d1.w;
d.h = d1.h;
return d;
}
data concat_datas(data *d, int n)
{
int i;
data out = {0};
for(i = 0; i < n; ++i){
data new = concat_data(d[i], out);
free_data(out);
out = new;
}
return out;
}
data load_categorical_data_csv(char *filename, int target, int k)
{
data d = {0};
d.shallow = 0;
matrix X = csv_to_matrix(filename);
float *truth_1d = pop_column(&X, target);
float **truth = one_hot_encode(truth_1d, X.rows, k);
matrix y;
y.rows = X.rows;
y.cols = k;
y.vals = truth;
d.X = X;
d.y = y;
free(truth_1d);
return d;
}
data load_cifar10_data(char *filename)
{
data d = {0};
d.shallow = 0;
long i,j;
matrix X = make_matrix(10000, 3072);
matrix y = make_matrix(10000, 10);
d.X = X;
d.y = y;
FILE *fp = fopen(filename, "rb");
if(!fp) file_error(filename);
for(i = 0; i < 10000; ++i){
unsigned char bytes[3073];
fread(bytes, 1, 3073, fp);
int class = bytes[0];
y.vals[i][class] = 1;
for(j = 0; j < X.cols; ++j){
X.vals[i][j] = (double)bytes[j+1];
}
}
scale_data_rows(d, 1./255);
//normalize_data_rows(d);
fclose(fp);
return d;
}
void get_random_batch(data d, int n, float *X, float *y)
{
int j;
for(j = 0; j < n; ++j){
int index = rand()%d.X.rows;
memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float));
memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float));
}
}
void get_next_batch(data d, int n, int offset, float *X, float *y)
{
int j;
for(j = 0; j < n; ++j){
int index = offset + j;
memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float));
if(y) memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float));
}
}
void smooth_data(data d)
{
int i, j;
float scale = 1. / d.y.cols;
float eps = .1;
for(i = 0; i < d.y.rows; ++i){
for(j = 0; j < d.y.cols; ++j){
d.y.vals[i][j] = eps * scale + (1-eps) * d.y.vals[i][j];
}
}
}
data load_all_cifar10()
{
data d = {0};
d.shallow = 0;
int i,j,b;
matrix X = make_matrix(50000, 3072);
matrix y = make_matrix(50000, 10);
d.X = X;
d.y = y;
for(b = 0; b < 5; ++b){
char buff[256];
sprintf(buff, "data/cifar/cifar-10-batches-bin/data_batch_%d.bin", b+1);
FILE *fp = fopen(buff, "rb");
if(!fp) file_error(buff);
for(i = 0; i < 10000; ++i){
unsigned char bytes[3073];
fread(bytes, 1, 3073, fp);
int class = bytes[0];
y.vals[i+b*10000][class] = 1;
for(j = 0; j < X.cols; ++j){
X.vals[i+b*10000][j] = (double)bytes[j+1];
}
}
fclose(fp);
}
//normalize_data_rows(d);
scale_data_rows(d, 1./255);
smooth_data(d);
return d;
}
data load_go(char *filename)
{
FILE *fp = fopen(filename, "rb");
matrix X = make_matrix(3363059, 361);
matrix y = make_matrix(3363059, 361);
int row, col;
if(!fp) file_error(filename);
char *label;
int count = 0;
while((label = fgetl(fp))){
int i;
if(count == X.rows){
X = resize_matrix(X, count*2);
y = resize_matrix(y, count*2);
}
sscanf(label, "%d %d", &row, &col);
char *board = fgetl(fp);
int index = row*19 + col;
y.vals[count][index] = 1;
for(i = 0; i < 19*19; ++i){
float val = 0;
if(board[i] == '1') val = 1;
else if(board[i] == '2') val = -1;
X.vals[count][i] = val;
}
++count;
free(label);
free(board);
}
X = resize_matrix(X, count);
y = resize_matrix(y, count);
data d = {0};
d.shallow = 0;
d.X = X;
d.y = y;
fclose(fp);
return d;
}
void randomize_data(data d)
{
int i;
for(i = d.X.rows-1; i > 0; --i){
int index = rand()%i;
float *swap = d.X.vals[index];
d.X.vals[index] = d.X.vals[i];
d.X.vals[i] = swap;
swap = d.y.vals[index];
d.y.vals[index] = d.y.vals[i];
d.y.vals[i] = swap;
}
}
void scale_data_rows(data d, float s)
{
int i;
for(i = 0; i < d.X.rows; ++i){
scale_array(d.X.vals[i], d.X.cols, s);
}
}
void translate_data_rows(data d, float s)
{
int i;
for(i = 0; i < d.X.rows; ++i){
translate_array(d.X.vals[i], d.X.cols, s);
}
}
data copy_data(data d)
{
data c = {0};
c.w = d.w;
c.h = d.h;
c.shallow = 0;
c.num_boxes = d.num_boxes;
c.boxes = d.boxes;
c.X = copy_matrix(d.X);
c.y = copy_matrix(d.y);
return c;
}
void normalize_data_rows(data d)
{
int i;
for(i = 0; i < d.X.rows; ++i){
normalize_array(d.X.vals[i], d.X.cols);
}
}
data get_data_part(data d, int part, int total)
{
data p = {0};
p.shallow = 1;
p.X.rows = d.X.rows * (part + 1) / total - d.X.rows * part / total;
p.y.rows = d.y.rows * (part + 1) / total - d.y.rows * part / total;
p.X.cols = d.X.cols;
p.y.cols = d.y.cols;
p.X.vals = d.X.vals + d.X.rows * part / total;
p.y.vals = d.y.vals + d.y.rows * part / total;
return p;
}
data get_random_data(data d, int num)
{
data r = {0};
r.shallow = 1;
r.X.rows = num;
r.y.rows = num;
r.X.cols = d.X.cols;
r.y.cols = d.y.cols;
r.X.vals = calloc(num, sizeof(float *));
r.y.vals = calloc(num, sizeof(float *));
int i;
for(i = 0; i < num; ++i){
int index = rand()%d.X.rows;
r.X.vals[i] = d.X.vals[index];
r.y.vals[i] = d.y.vals[index];
}
return r;
}
data *split_data(data d, int part, int total)
{
data *split = calloc(2, sizeof(data));
int i;
int start = part*d.X.rows/total;
int end = (part+1)*d.X.rows/total;
data train;
data test;
train.shallow = test.shallow = 1;
test.X.rows = test.y.rows = end-start;
train.X.rows = train.y.rows = d.X.rows - (end-start);
train.X.cols = test.X.cols = d.X.cols;
train.y.cols = test.y.cols = d.y.cols;
train.X.vals = calloc(train.X.rows, sizeof(float*));
test.X.vals = calloc(test.X.rows, sizeof(float*));
train.y.vals = calloc(train.y.rows, sizeof(float*));
test.y.vals = calloc(test.y.rows, sizeof(float*));
for(i = 0; i < start; ++i){
train.X.vals[i] = d.X.vals[i];
train.y.vals[i] = d.y.vals[i];
}
for(i = start; i < end; ++i){
test.X.vals[i-start] = d.X.vals[i];
test.y.vals[i-start] = d.y.vals[i];
}
for(i = end; i < d.X.rows; ++i){
train.X.vals[i-(end-start)] = d.X.vals[i];
train.y.vals[i-(end-start)] = d.y.vals[i];
}
split[0] = train;
split[1] = test;
return split;
}
|
jacobiinitialize-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
// using of private() clause
#include <stdio.h>
#include <math.h>
#define MSIZE 200
int n=MSIZE, m=MSIZE;
double alpha = 0.0543;
double u[MSIZE][MSIZE], f[MSIZE][MSIZE], uold[MSIZE][MSIZE];
double dx, dy;
void
initialize ()
{
int i, j, xx, yy;
dx = 2.0 / (n - 1); // -->dx@112:2
dy = 2.0 / (m - 1); //-->dy@113:2
/* Initialize initial condition and RHS */
#pragma omp parallel for private(i,j,xx,yy)
for (i = 0; i < n; i++)
for (j = 0; j < m; j++)
{
xx = (int) (-1.0 + dx * (i - 1)); /* -1 < x < 1 */
yy = (int) (-1.0 + dy * (j - 1)); /* -1 < y < 1 */
u[i][j] = 0.0;
f[i][j] = -1.0 * alpha * (1.0 - xx * xx) * (1.0 - yy * yy)
- 2.0 * (1.0 - xx * xx) - 2.0 * (1.0 - yy * yy);
}
}
int main()
{
initialize();
return 0;
}
|
FloydsAlgorithm.c | /*
Algorithm 11 - Floyd's Algorithm
Implement All-Pairs Shortest Paths Problem using Floyd's algorithm. Parallelize this algorithm, implement it using OpenMP and determine the speed-up achieved.
*/
#include <stdio.h>
#include <time.h>
#include <omp.h>
int total_threads;
int minimum(int a, int b);
void floydsAlgorithm(int a[10][10], int n);
int main() {
int a[10][10], n, i, j;
double time_taken;
clock_t begin_clock, end_clock;
printf("Enter the number of nodes: ");
scanf("%d", &n);
printf("Enter the cost adjacency matrix:\n");
for (i = 1; i <= n; i++)
for (j = 1; j <= n; j++)
scanf("%d", &a[i][j]);
begin_clock = clock();
floydsAlgorithm(a, n);
end_clock = clock();
printf("All-Pairs Shortest Paths is as follows:\n");
for (i = 1; i <= n; i++) {
for (j = 1; j <= n; j++)
printf("d ", a[i][j]);
printf("\n");
}
time_taken = (end_clock - begin_clock) / (double) CLOCKS_PER_SEC;
printf("\nThe time taken to perform Floyd's Algorithm is: %f\n", time_taken);
return 0;
}
int minimum(int a, int b) {
return (a < b) ? a : b;
}
void floydsAlgorithm(int a[10][10], int n) {
int i, j, k, min;
int no_of_threads, thread_id;
for (i = 1; i <= 10000; i++)
for (j = 1; j <= 10000; j++);
#pragma omp parallel shared (i, j, k, a, n, no_of_threads, thread_id)
{
#pragma omp sections nowait
{
for (k = 1; k <= n; k++)
for (i = 1; i <= n; i++)
for (j = 1; j <= n; j++)
a[i][j] = minimum(a[i][j], a[i][k] + a[k][j]);
}
thread_id = omp_get_thread_num();
if (thread_id == 0) {
no_of_threads = omp_get_num_threads();
if (total_threads < no_of_threads)
total_threads = no_of_threads;
printf("\nTotal Threads Used are: %d\n", no_of_threads);
}
}
}
/*
Output
------
Enter the number of nodes: 4
Enter the cost adjacency matrix:
0 1 2 999
3 0 1 999
2 5 0 4
999 6 2 0
Total Threads Used are: 8
All-Pairs Shortest Paths is as follows:
0 1 2 6
3 0 1 5
2 3 0 4
4 5 2 0
The time taken to perform Floyd's Algorithm is: 0.217000
*/
|
dropout-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file dropout-inl.h
* \brief
* \author Bing Xu, Da Zheng, Hang Zhang
*/
#ifndef MXNET_OPERATOR_NN_DROPOUT_INL_H_
#define MXNET_OPERATOR_NN_DROPOUT_INL_H_
#include <dmlc/logging.h>
#include <dmlc/parameter.h>
#include <mxnet/operator.h>
#include <map>
#include <vector>
#include <string>
#include <utility>
#include <algorithm>
#include "../mxnet_op.h"
#include "../mshadow_op.h"
#include "../random/sampler.h"
#include "../tensor/elemwise_binary_broadcast_op.h"
#define MXNET_USE_MKL_DROPOUT defined(USE_MKL) && defined(_OPENMP) && !defined(__CUDACC__)
#if MXNET_USE_MKL_DROPOUT
#include <omp.h>
#include <mkl_vml_functions.h>
#include <mkl_vsl.h>
#endif // MXNET_USE_MKL_DROPOUT
#define MXNET_USE_CUDNN_DROPOUT MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 7
namespace dropout {
enum DropoutOpInputs {kData};
enum DropoutOpOutputs {kOut, kMask};
enum DropoutOpForwardResource {kRandom};
enum DropoutOpMode {kTraining, kAlways};
} // namespace dropout
namespace mxnet {
namespace op {
const int MAX_DIM = 5;
struct DropoutParam : public dmlc::Parameter<DropoutParam> {
float p;
int mode;
mxnet::TShape axes;
dmlc::optional<bool> cudnn_off;
DMLC_DECLARE_PARAMETER(DropoutParam) {
DMLC_DECLARE_FIELD(p).set_default(0.5)
.set_range(0, 1)
.describe("Fraction of the input that gets dropped out during training time.");
DMLC_DECLARE_FIELD(mode)
.add_enum("training", dropout::kTraining)
.add_enum("always", dropout::kAlways)
.set_default(dropout::kTraining)
.describe("Whether to only turn on dropout during training or to also turn on for inference.");
DMLC_DECLARE_FIELD(axes).set_default(mxnet::TShape())
.describe("Axes for variational dropout kernel.");
DMLC_DECLARE_FIELD(cudnn_off).set_default(dmlc::optional<bool>(true))
.describe("Whether to turn off cudnn in dropout operator. "
"This option is ignored if axes is specified.");
}
}; // struct DropoutParam
template<typename xpu, typename DType>
class DropoutOp {
#if MXNET_USE_MKL_DROPOUT
static void BernoulliGenerate(common::random::RandGenerator<cpu, DType> gen,
int n, double p, int* r) {
typename RandGenerator<xpu, DType>::Impl genImpl(&gen, 1);
const int seed = 17 + abs(genImpl.rand() % 4096);
CHECK_GE(seed, 0);
const int nthr = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
#pragma omp parallel num_threads(nthr)
{
const int ithr = omp_get_thread_num();
const int avg_amount = (n + nthr - 1) / nthr;
const int my_offset = ithr * avg_amount;
const int my_amount = std::min(my_offset + avg_amount, n) - my_offset;
if (my_amount > 0) {
VSLStreamStatePtr stream;
vslNewStream(&stream, VSL_BRNG_MCG31, seed);
vslSkipAheadStream(stream, my_offset);
viRngBernoulli(VSL_RNG_METHOD_BERNOULLI_ICDF, stream, my_amount, r + my_offset, p);
vslDeleteStream(&stream);
}
}
}
static inline bool MKLAvailable() {
// BernoulliGenerate expects an array int, so for types smaller than int, the mask buffer
// will be too small, so we can;t use MKL in those cases
return sizeof(DType) >= sizeof(int);
}
// MKL forward pass
inline void MKLForward(const OpContext &ctx,
const std::vector<TBlob> &in_data,
const std::vector<TBlob> &out_data) {
Stream<xpu> *s = ctx.get_stream<xpu>();
RandGenerator<xpu, DType> *pgen = ctx.requested[0].get_parallel_random<xpu, DType>();
CHECK_NOTNULL(pgen);
Tensor<xpu, 2, DType> mask = out_data[dropout::kMask].FlatTo2D<xpu, DType>(s);
Tensor<xpu, 2, DType> data = in_data[dropout::kData].FlatTo2D<xpu, DType>(s);
Tensor<xpu, 2, DType> out = out_data[dropout::kOut].FlatTo2D<xpu, DType>(s);
DType *outptr = out.dptr_;
DType *dataptr = data.dptr_;
auto maskptr = reinterpret_cast<int *>(mask.dptr_);
int count = mask.shape_[0] * mask.shape_[1];
BernoulliGenerate(*pgen, count, this->pkeep_, maskptr);
const float pk_1 = 1.0f / this->pkeep_;
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (int i = 0; i < count; ++i) {
outptr[i] = dataptr[i] * maskptr[i] * pk_1;
}
}
// MKL backward pass
inline void MKLBackward(const OpContext &ctx,
const std::vector<TBlob> &in_grad,
const std::vector<TBlob> &out_data,
const std::vector<TBlob> &out_grad) {
Stream<xpu> *s = ctx.get_stream<xpu>();
Tensor<xpu, 2, DType> grad = out_grad[dropout::kOut].FlatTo2D<xpu, DType>(s);
Tensor<xpu, 2, DType> mask = out_data[dropout::kMask].FlatTo2D<xpu, DType>(s);
Tensor<xpu, 2, DType> gdata = in_grad[dropout::kData].FlatTo2D<xpu, DType>(s);
DType *ingradptr = gdata.dptr_;
const DType *outgradptr = grad.dptr_;
auto maskptr = reinterpret_cast<int *>(mask.dptr_);
int count = mask.shape_[0] * mask.shape_[1];
const float pk_1 = 1.0f / this->pkeep_;
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (int i = 0; i < count; ++i) {
ingradptr[i] = outgradptr[i] * maskptr[i] * pk_1;
}
}
#endif // #if MXNET_USE_MKL_DROPOUT
public:
/*!
* \brief Dropout kernel, compute dropout tensor
*/
struct DropoutKernel {
/*!
* \brief Dropout kernel function
* \param id Thread number (0-based representing count)
* \param gen Random number generator
* \param N Total number of items in the output
* \param step Step between items, related to parallelism
* \param dropout_out Output dropout values
* \param mask_out Output mask (is multiplied to create dropout output, may be 0)
* \param input_data Input data to perform the dropout on
* \param pkeep Dropout rate (keep when the generated random number is less than this value)
*/
MSHADOW_XINLINE static void Map(int id,
RandGenerator<xpu, DType> gen,
const int N,
const int step,
DType *dropout_out,
DType *mask_out,
const DType *input_data,
const real_t pkeep) {
RNG_KERNEL_LOOP(xpu, DType, id, gen, N, step, {
const real_t rand_num = static_cast<real_t>(genImpl.uniform());
mask_out[i] = mshadow_op::threshold_eq::Map<real_t>(rand_num, pkeep) * (1.0f / pkeep);
dropout_out[i] = input_data[i] * mask_out[i];
});
}
};
struct BernoulliKernel {
/*! \brief Bernoulli kernel for generating mask */
MSHADOW_XINLINE static void Map(int id,
RandGenerator<xpu, DType> gen,
const int N,
const int step,
DType *mask_out,
const real_t pkeep) {
RNG_KERNEL_LOOP(xpu, DType, id, gen, N, step, {
const real_t rand_num = static_cast<real_t>(genImpl.uniform());
mask_out[i] = mshadow_op::threshold::Map<real_t>(rand_num, pkeep) * (1.0f / pkeep);
});
}
};
explicit DropoutOp(const DropoutParam ¶m, Context ctx) {
this->pkeep_ = 1.0f - param.p;
this->mode_ = static_cast<dropout::DropoutOpMode>(param.mode);
this->axes_ = param.axes;
this->dropout_passthrough_ = true;
#if MXNET_USE_CUDNN_DROPOUT
this->cudnn_off_ = param.cudnn_off && param.cudnn_off.value();
this->ctx_ = ctx;
if (ctx.dev_type == kGPU && this->pkeep_ > 0 && !this->cudnn_off_) {
dtype_ = mshadow::DataType<DType>::kCudnnFlag;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc_));
CUDNN_CALL(cudnnCreateTensorDescriptor(&y_desc_));
CUDNN_CALL(cudnnCreateTensorDescriptor(&dx_desc_));
CUDNN_CALL(cudnnCreateTensorDescriptor(&dy_desc_));
CUDNN_CALL(cudnnCreateDropoutDescriptor(&dropout_desc_));
}
#endif // MXNET_USE_CUDNN_DROPOUT
}
~DropoutOp() {
#if MXNET_USE_CUDNN_DROPOUT
if (this->ctx_.dev_type == kGPU && this->pkeep_ > 0 && !this->cudnn_off_) {
CUDNN_CALL(cudnnDestroyTensorDescriptor(x_desc_));
CUDNN_CALL(cudnnDestroyTensorDescriptor(y_desc_));
CUDNN_CALL(cudnnDestroyTensorDescriptor(dx_desc_));
CUDNN_CALL(cudnnDestroyTensorDescriptor(dy_desc_));
CUDNN_CALL(cudnnDestroyDropoutDescriptor(dropout_desc_));
}
#endif // MXNET_USE_CUDNN_DROPOUT
}
#if MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__)
inline bool CuDNNAvailable() {
return this->pkeep_ > 0 && !this->cudnn_off_;
}
inline void CuDNNForward(const OpContext &ctx,
const TBlob &in,
const TBlob &mask,
const TBlob &out) {
Stream<xpu> *s = ctx.get_stream<xpu>();
// set dropout state.
ctx.requested[0].get_cudnn_dropout_desc(&dropout_desc_, s, 1.0f - this->pkeep_, seed_);
// describe input/output tensor
int dim[4], stride[4];
dim[0] = 1;
dim[1] = 1;
dim[2] = 1;
dim[3] = out.Size();
stride[0] = out.Size();
stride[1] = out.Size();
stride[2] = out.Size();
stride[3] = 1;
CUDNN_CALL(cudnnSetTensorNdDescriptor(x_desc_,
dtype_,
4,
dim,
stride));
CUDNN_CALL(cudnnSetTensorNdDescriptor(y_desc_,
dtype_,
4,
dim,
stride));
// perform dropout with cudnn
CUDNN_CALL(cudnnDropoutGetReserveSpaceSize(x_desc_, &dropout_reserve_byte_));
// cudnn uses bits to record the positions that are dropped, so reserve bytes is always
// 1/8 of input size.
CHECK_GE(mask.Size() * sizeof(DType), dropout_reserve_byte_) <<
"The size of the mask space is smaller than the required cudnn reserved space.";
CUDNN_CALL(cudnnDropoutForward(s->dnn_handle_,
dropout_desc_,
x_desc_,
in.dptr<DType>(),
y_desc_,
out.dptr<DType>(),
mask.dptr<DType>(),
dropout_reserve_byte_));
}
inline void CuDNNBackward(const OpContext &ctx,
const TBlob &out_grad,
const TBlob &mask,
const TBlob &in_grad) {
Stream<xpu> *s = ctx.get_stream<xpu>();
// describe input/output tensor
int dim[4], stride[4];
dim[0] = 1;
dim[1] = 1;
dim[2] = 1;
dim[3] = in_grad.Size();
stride[0] = in_grad.Size();
stride[1] = in_grad.Size();
stride[2] = in_grad.Size();
stride[3] = 1;
CUDNN_CALL(cudnnSetTensorNdDescriptor(dy_desc_,
dtype_,
4,
dim,
stride));
CUDNN_CALL(cudnnSetTensorNdDescriptor(dx_desc_,
dtype_,
4,
dim,
stride));
// perform dropout with cudnn
CUDNN_CALL(cudnnDropoutBackward(s->dnn_handle_,
dropout_desc_,
dy_desc_,
out_grad.dptr<DType>(),
dx_desc_,
in_grad.dptr<DType>(),
mask.dptr<DType>(),
dropout_reserve_byte_));
}
#endif // MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__)
void Forward(const OpContext &ctx,
const std::vector<TBlob> &in_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &out_data) {
this->dropout_passthrough_ = true;
if (req[dropout::kOut] != kNullOp) {
CHECK_EQ(in_data.size(), 1U);
if (ctx.is_train) {
CHECK_EQ(out_data.size(), 2U);
}
Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob &in = in_data[dropout::kData];
const TBlob &out = out_data[dropout::kOut];
const TBlob &mask = out_data[dropout::kMask];
if (this->pkeep_ < 1 && (ctx.is_train || this->mode_ == dropout::kAlways)) {
this->dropout_passthrough_ = false;
if (this->axes_.ndim() == 0) {
#if MXNET_USE_MKL_DROPOUT
if (MKLAvailable()) {
MKLForward(ctx, in_data, out_data);
return;
}
#endif // MXNET_USE_MKL_DROPOUT
#if MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__)
if (CuDNNAvailable()) {
CuDNNForward(ctx, in, mask, out);
return;
}
#endif // MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__)
RandGenerator<xpu, DType> *pgen = ctx.requested[0].get_parallel_random<xpu, DType>();
CHECK_NOTNULL(pgen);
CHECK(req[dropout::kOut] != kAddTo);
LaunchRNG<DropoutKernel, xpu>(s, pgen, out.Size(),
out.dptr<DType>(),
mask.dptr<DType>(),
in.dptr<DType>(),
this->pkeep_);
return;
} else {
RandGenerator<xpu, DType> *pgen = ctx.requested[0].get_parallel_random<xpu, DType>();
CHECK_NOTNULL(pgen);
// initialize the mask
LaunchRNG<BernoulliKernel, xpu>(s, pgen, mask.Size(),
mask.dptr<DType>(),
this->pkeep_);
// broadcast mul
mxnet::TShape new_lshape, new_rshape, new_oshape;
int ndim = BinaryBroadcastShapeCompact(in.shape_,
mask.shape_, out.shape_,
&new_lshape, &new_rshape, &new_oshape);
if (!ndim) {
MXNET_ASSIGN_REQ_SWITCH(req[dropout::kOut], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch(
s, out.Size(), out.dptr<DType>(), in.dptr<DType>(),
mask.dptr<DType>());
});
} else {
BROADCAST_NDIM_SWITCH(ndim, NDim, {
mshadow::Shape<NDim> oshape = new_oshape.get<NDim>();
mshadow::Shape<NDim> lstride = mxnet_op::calc_stride(new_lshape.get<NDim>());
mshadow::Shape<NDim> rstride = mxnet_op::calc_stride(new_rshape.get<NDim>());
mxnet_op::Kernel<mxnet_op::binary_broadcast_kernel<NDim, DType,
mshadow_op::mul>, xpu>::
template LaunchEx(s, new_oshape.Size(), req[dropout::kOut],
lstride, rstride, oshape,
in.dptr<DType>(),
mask.dptr<DType>(), out.dptr<DType>());
});
}
}
} else {
MXNET_ASSIGN_REQ_SWITCH(req[dropout::kOut], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, xpu>::Launch(
s, out.Size(), out.dptr<DType>(), in.dptr<DType>());
});
}
}
}
void Backward(const OpContext &ctx,
const std::vector<TBlob> &out_grad,
const std::vector<TBlob> &out_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &in_grad) {
using namespace mshadow;
using namespace mshadow::expr;
Stream<xpu> *s = ctx.get_stream<xpu>();
if (!this->dropout_passthrough_) {
this->dropout_passthrough_ = true;
const TBlob &gdata = in_grad[dropout::kData];
const TBlob &grad = out_grad[dropout::kOut];
const TBlob &mask = out_data[dropout::kMask];
if (this->axes_.ndim() == 0) {
#if MXNET_USE_MKL_DROPOUT
if (MKLAvailable()) {
MKLBackward(ctx, in_grad, out_data, out_grad);
return;
}
#endif // MXNET_USE_MKL_DROPOUT
#if MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__)
if (CuDNNAvailable()) {
CuDNNBackward(ctx, grad, mask, gdata);
return;
}
#endif // MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__)
// standard case for dropout
CHECK_EQ(grad.Size(), mask.Size());
MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch(
s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>(), mask.dptr<DType>());
});
return;
} else {
// broardcast mul
mxnet::TShape new_lshape, new_rshape, new_oshape;
int ndim = BinaryBroadcastShapeCompact(grad.shape_,
mask.shape_, gdata.shape_,
&new_lshape, &new_rshape, &new_oshape);
if (!ndim) {
MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch(
s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>(), mask.dptr<DType>());
});
} else {
BROADCAST_NDIM_SWITCH(ndim, NDim, {
mshadow::Shape<NDim> oshape = new_oshape.get<NDim>();
mshadow::Shape<NDim> lstride = mxnet_op::calc_stride(new_lshape.get<NDim>());
mshadow::Shape<NDim> rstride = mxnet_op::calc_stride(new_rshape.get<NDim>());
mxnet_op::Kernel<mxnet_op::binary_broadcast_kernel<NDim, DType,
mshadow_op::mul>, xpu>::
template LaunchEx(s, new_oshape.Size(), req[0], lstride, rstride, oshape,
grad.dptr<DType>(), mask.dptr<DType>(), gdata.dptr<DType>());
});
}
}
} else {
const TBlob& gdata = in_grad[dropout::kData];
const TBlob& grad = out_grad[dropout::kOut];
MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, xpu>::Launch(
s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>());
});
}
}
private:
/*! \brief Dropout rate (keep when the generated random number is less than this value) */
real_t pkeep_;
/*! \brief Dropout mode */
dropout::DropoutOpMode mode_;
/*! \brief Axes on which dropout mask is shared in the form of broadcast multiply */
mxnet::TShape axes_;
/*! \brief Flag to record whether forward is executed in pass-through mode */
bool dropout_passthrough_;
#if MXNET_USE_CUDNN_DROPOUT
bool cudnn_off_;
Context ctx_;
cudnnDataType_t dtype_;
cudnnDropoutDescriptor_t dropout_desc_;
uint64_t seed_ = 17 + rand() % 4096; // NOLINT(runtime/threadsafe_fn)
size_t dropout_reserve_byte_;
cudnnTensorDescriptor_t x_desc_, y_desc_, dx_desc_, dy_desc_;
#endif // MXNET_USE_CUDNN_DROPOUT
}; // class DropoutOp
static OpStatePtr CreateDropoutState(const nnvm::NodeAttrs &attrs,
const Context ctx,
const mxnet::ShapeVector &in_shapes,
const std::vector<int> &in_types) {
const DropoutParam& param = nnvm::get<DropoutParam>(attrs.parsed);
OpStatePtr state;
MSHADOW_REAL_TYPE_SWITCH(in_types[dropout::kData], DType, {
if (ctx.dev_type == kGPU) {
state = OpStatePtr::Create<DropoutOp<gpu, DType>>(param, ctx);
} else {
state = OpStatePtr::Create<DropoutOp<cpu, DType>>(param, ctx);
}
return state;
});
LOG(FATAL) << "should never reach here";
return OpStatePtr(); // should never reach here
}
template<typename xpu>
void DropoutCompute(const OpStatePtr& state,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, {
DropoutOp<xpu, DType>& op = state.get_state<DropoutOp<xpu, DType>>();
op.Forward(ctx, inputs, req, outputs);
});
}
template<typename xpu>
void DropoutGradCompute(const OpStatePtr& state,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1);
CHECK_EQ(req.size(), 1);
std::vector<TBlob> out_grads(2);
std::vector<TBlob> out_data(2);
out_grads[dropout::kOut] = inputs[0];
out_data[dropout::kMask] = inputs[1];
MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, {
DropoutOp<xpu, DType>& op = state.get_state<DropoutOp<xpu, DType>>();
op.Backward(ctx, out_grads, out_data, req, outputs);
});
}
} // namespace op
} // namespace mxnet
#undef MXNET_USE_MKL_DROPOUT
#endif // MXNET_OPERATOR_NN_DROPOUT_INL_H_
|
explicit_task.c | // RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s
// REQUIRES: ompt
// UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7
#define TEST_NEED_PRINT_FRAME_FROM_OUTLINED_FN
#include "callback.h"
#include <omp.h>
int main()
{
int condition=0;
omp_set_nested(0);
print_frame(0);
#pragma omp parallel num_threads(2)
{
print_frame_from_outlined_fn(1);
print_ids(0);
print_ids(1);
print_frame(0);
#pragma omp master
{
print_ids(0);
#pragma omp task shared(condition)
{
OMPT_SIGNAL(condition);
print_frame(1);
print_ids(0);
print_ids(1);
print_ids(2);
}
print_fuzzy_address(1);
OMPT_WAIT(condition,1);
print_ids(0);
}
#pragma omp barrier
print_ids(0);
}
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_create'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_schedule'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_begin'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_end'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_implicit_task'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquire'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquired'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_released'
// CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]]
// make sure initial data pointers are null
// CHECK-NOT: 0: new_task_data initially not null
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: __builtin_frame_address(0)=[[MAIN_REENTER:0x[0-f]+]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_begin: parent_task_id=[[PARENT_TASK_ID:[0-9]+]], parent_task_frame.exit=[[NULL]], parent_task_frame.reenter=[[MAIN_REENTER]], parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=2, codeptr_ra=0x{{[0-f]+}}, invoker=[[PARALLEL_INVOKER:[0-9]+]]
// nested parallel masters
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK: {{^}}[[MASTER_ID]]: __builtin_frame_address({{.}})=[[EXIT:0x[0-f]+]]
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reenter_frame=[[NULL]]
// CHECK: {{^}}[[MASTER_ID]]: task level 1: parallel_id=[[IMPLICIT_PARALLEL_ID:[0-9]+]], task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[MAIN_REENTER]]
// CHECK: {{^}}[[MASTER_ID]]: __builtin_frame_address(0)=[[REENTER:0x[0-f]+]]
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reenter_frame=[[NULL]]
// <- ompt_event_task_create would be expected here
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create: parent_task_id=[[IMPLICIT_TASK_ID]], parent_task_frame.exit=[[EXIT]], parent_task_frame.reenter=[[REENTER]], new_task_id=[[TASK_ID:[0-9]+]], codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
// CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reenter_frame=[[NULL]]
// explicit barrier after master
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reenter_frame=[[REENTER]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// implicit barrier parallel
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[NULL]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK: {{^}}[[THREAD_ID]]: __builtin_frame_address({{.}})=[[EXIT:0x[0-f]+]]
// CHECK: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reenter_frame=[[NULL]]
// CHECK: {{^}}[[THREAD_ID]]: task level 1: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[MAIN_REENTER]]
// CHECK: {{^}}[[THREAD_ID]]: __builtin_frame_address(0)=[[REENTER:0x[0-f]+]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reenter_frame=[[REENTER]]
// this is expected to come earlier and at MASTER:
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_task_schedule: first_task_id=[[IMPLICIT_TASK_ID]], second_task_id=[[TASK_ID]]
// CHECK: {{^}}[[THREAD_ID]]: __builtin_frame_address(1)=[[TASK_EXIT:0x[0-f]+]]
// CHECK: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID]], exit_frame=[[TASK_EXIT]], reenter_frame=[[NULL]]
// CHECK: {{^}}[[THREAD_ID]]: task level 1: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reenter_frame=[[REENTER]]
// CHECK: {{^}}[[THREAD_ID]]: task level 2: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[MAIN_REENTER]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_task_schedule: first_task_id=[[TASK_ID]], second_task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_task_end: task_id=[[TASK_ID]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[NULL]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
return 0;
}
|
for_loop.c | // RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s
// REQUIRES: ompt
// UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7, gcc-8
#include "callback.h"
#include <omp.h>
int main()
{
int y[] = {0,1,2,3};
#pragma omp parallel num_threads(2)
{
//implicit barrier at end of for loop
int i;
#pragma omp for
for (i = 0; i < 4; i++)
{
y[i]++;
}
print_current_address();
}
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_sync_region'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_sync_region_wait'
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
// master thread implicit barrier at loop end
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}
// master thread implicit barrier at parallel end
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}
// worker thread explicit barrier
// CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_wait_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_wait_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}
// worker thread implicit barrier after parallel
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[NULL]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_wait_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[NULL]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_wait_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[NULL]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[NULL]]
return 0;
}
|
displacement_lagrangemultiplier_contact_criteria.h | // KRATOS ___| | | |
// \___ \ __| __| | | __| __| | | __| _` | |
// | | | | | ( | | | | ( | |
// _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS
//
// License: BSD License
// license: StructuralMechanicsApplication/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_CONTACT_CRITERIA_H)
#define KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_CONTACT_CRITERIA_H
/* System includes */
/* External includes */
/* Project includes */
#include "utilities/table_stream_utility.h"
#include "solving_strategies/convergencecriterias/convergence_criteria.h"
#include "utilities/color_utilities.h"
namespace Kratos
{
///@addtogroup ContactStructuralMechanicsApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@name Kratos Classes
///@{
/**
* @class DisplacementLagrangeMultiplierContactCriteria
* @ingroup ContactStructuralMechanicsApplication
* @brief Convergence criteria for contact problems
* @details This class implements a convergence control based on nodal displacement and
* lagrange multiplier values. The error is evaluated separately for each of them, and
* relative and absolute tolerances for both must be specified.
* @author Vicente Mataix Ferrandiz
*/
template< class TSparseSpace,
class TDenseSpace >
class DisplacementLagrangeMultiplierContactCriteria
: public ConvergenceCriteria< TSparseSpace, TDenseSpace >
{
public:
///@name Type Definitions
///@{
/// Pointer definition of DisplacementLagrangeMultiplierContactCriteria
KRATOS_CLASS_POINTER_DEFINITION( DisplacementLagrangeMultiplierContactCriteria );
/// The base class definition (and it subclasses)
typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
/// The sparse space used
typedef TSparseSpace SparseSpaceType;
/// The table stream definition TODO: Replace by logger
typedef TableStreamUtility::Pointer TablePrinterPointerType;
/// The index type definition
typedef std::size_t IndexType;
/// The key type definition
typedef std::size_t KeyType;
///@}
///@name Life Cycle
///@{
/// Constructor.
/**
* @param DispRatioTolerance Relative tolerance for displacement error
* @param DispAbsTolerance Absolute tolerance for displacement error
* @param LMRatioTolerance Relative tolerance for lagrange multiplier error
* @param LMAbsTolerance Absolute tolerance for lagrange multiplier error
* @param EnsureContact To check if the contact is lost
* @param pTable The pointer to the output table
* @param PrintingOutput If the output is going to be printed in a txt file
*/
explicit DisplacementLagrangeMultiplierContactCriteria(
const TDataType DispRatioTolerance,
const TDataType DispAbsTolerance,
const TDataType LMRatioTolerance,
const TDataType LMAbsTolerance,
const bool EnsureContact = false,
const bool PrintingOutput = false
)
: ConvergenceCriteria< TSparseSpace, TDenseSpace >(),
mEnsureContact(EnsureContact),
mPrintingOutput(PrintingOutput),
mTableIsInitialized(false)
{
// The displacement solution
mDispRatioTolerance = DispRatioTolerance;
mDispAbsTolerance = DispAbsTolerance;
// The contact solution
mLMRatioTolerance = LMRatioTolerance;
mLMAbsTolerance = LMAbsTolerance;
}
/**
* @brief Default constructor (parameters)
* @param ThisParameters The configuration parameters
*/
explicit DisplacementLagrangeMultiplierContactCriteria( Parameters ThisParameters = Parameters(R"({})"))
: ConvergenceCriteria< TSparseSpace, TDenseSpace >(),
mTableIsInitialized(false)
{
// The default parameters
Parameters default_parameters = Parameters(R"(
{
"ensure_contact" : false,
"print_convergence_criterion" : false,
"displacement_relative_tolerance" : 1.0e-4,
"displacement_absolute_tolerance" : 1.0e-9,
"contact_displacement_relative_tolerance" : 1.0e-4,
"contact_displacement_absolute_tolerance" : 1.0e-9
})" );
ThisParameters.ValidateAndAssignDefaults(default_parameters);
// The displacement solution
mDispRatioTolerance = ThisParameters["displacement_relative_tolerance"].GetDouble();
mDispAbsTolerance = ThisParameters["displacement_absolute_tolerance"].GetDouble();
// The contact solution
mLMRatioTolerance = ThisParameters["contact_displacement_relative_tolerance"].GetDouble();
mLMAbsTolerance = ThisParameters["contact_displacement_absolute_tolerance"].GetDouble();
// Additional flags -> NOTE: Replace for a real flag?
mEnsureContact = ThisParameters["ensure_contact"].GetBool();
mPrintingOutput = ThisParameters["print_convergence_criterion"].GetBool();
}
// Copy constructor.
DisplacementLagrangeMultiplierContactCriteria( DisplacementLagrangeMultiplierContactCriteria const& rOther )
:BaseType(rOther)
,mDispRatioTolerance(rOther.mDispRatioTolerance)
,mDispAbsTolerance(rOther.mDispAbsTolerance)
,mLMRatioTolerance(rOther.mLMRatioTolerance)
,mLMAbsTolerance(rOther.mLMAbsTolerance)
,mEnsureContact(rOther.mEnsureContact)
,mPrintingOutput(rOther.mPrintingOutput)
,mTableIsInitialized(rOther.mTableIsInitialized)
{
}
/// Destructor.
~DisplacementLagrangeMultiplierContactCriteria() override = default;
///@}
///@name Operators
///@{
/**
* @brief Compute relative and absolute error.
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
* @return true if convergence is achieved, false otherwise
*/
bool PostCriteria(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
if (SparseSpaceType::Size(rDx) != 0) { //if we are solving for something
// Initialize
TDataType disp_solution_norm = 0.0, lm_solution_norm = 0.0, disp_increase_norm = 0.0, lm_increase_norm = 0.0;
IndexType disp_dof_num(0),lm_dof_num(0);
// Loop over Dofs
#pragma omp parallel for reduction(+:disp_solution_norm,lm_solution_norm,disp_increase_norm,lm_increase_norm,disp_dof_num,lm_dof_num)
for (int i = 0; i < static_cast<int>(rDofSet.size()); i++) {
auto it_dof = rDofSet.begin() + i;
std::size_t dof_id;
TDataType dof_value, dof_incr;
if (it_dof->IsFree()) {
dof_id = it_dof->EquationId();
dof_value = it_dof->GetSolutionStepValue(0);
dof_incr = rDx[dof_id];
const auto curr_var = it_dof->GetVariable();
if ((curr_var == VECTOR_LAGRANGE_MULTIPLIER_X) || (curr_var == VECTOR_LAGRANGE_MULTIPLIER_Y) || (curr_var == VECTOR_LAGRANGE_MULTIPLIER_Z) || (curr_var == LAGRANGE_MULTIPLIER_CONTACT_PRESSURE)) {
lm_solution_norm += dof_value * dof_value;
lm_increase_norm += dof_incr * dof_incr;
lm_dof_num++;
} else {
disp_solution_norm += dof_value * dof_value;
disp_increase_norm += dof_incr * dof_incr;
disp_dof_num++;
}
}
}
if(disp_increase_norm == 0.0) disp_increase_norm = 1.0;
if(lm_increase_norm == 0.0) lm_increase_norm = 1.0;
if(disp_solution_norm == 0.0) disp_solution_norm = 1.0;
KRATOS_ERROR_IF(mEnsureContact && lm_solution_norm == 0.0) << "WARNING::CONTACT LOST::ARE YOU SURE YOU ARE SUPPOSED TO HAVE CONTACT?" << std::endl;
const TDataType disp_ratio = std::sqrt(disp_increase_norm/disp_solution_norm);
const TDataType lm_ratio = std::sqrt(lm_increase_norm/lm_solution_norm);
const TDataType disp_abs = std::sqrt(disp_increase_norm)/ static_cast<TDataType>(disp_dof_num);
const TDataType lm_abs = std::sqrt(lm_increase_norm)/ static_cast<TDataType>(lm_dof_num);
// The process info of the model part
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
// We print the results // TODO: Replace for the new log
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
std::cout.precision(4);
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& Table = p_table->GetTable();
Table << disp_ratio << mDispRatioTolerance << disp_abs << mDispAbsTolerance << lm_ratio << mLMRatioTolerance << lm_abs << mLMAbsTolerance;
} else {
std::cout.precision(4);
if (mPrintingOutput == false) {
KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << BOLDFONT("DoF ONVERGENCE CHECK") << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << BOLDFONT("\tDISPLACEMENT: RATIO = ") << disp_ratio << BOLDFONT(" EXP.RATIO = ") << mDispRatioTolerance << BOLDFONT(" ABS = ") << disp_abs << BOLDFONT(" EXP.ABS = ") << mDispAbsTolerance << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << BOLDFONT(" LAGRANGE MUL:\tRATIO = ") << lm_ratio << BOLDFONT(" EXP.RATIO = ") << mLMRatioTolerance << BOLDFONT(" ABS = ") << lm_abs << BOLDFONT(" EXP.ABS = ") << mLMAbsTolerance << std::endl;
} else {
KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << "DoF ONVERGENCE CHECK" << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << "\tDISPLACEMENT: RATIO = " << disp_ratio << " EXP.RATIO = " << mDispRatioTolerance << " ABS = " << disp_abs << " EXP.ABS = " << mDispAbsTolerance << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << " LAGRANGE MUL:\tRATIO = " << lm_ratio << " EXP.RATIO = " << mLMRatioTolerance << " ABS = " << lm_abs << " EXP.ABS = " << mLMAbsTolerance << std::endl;
}
}
}
// We check if converged
const bool disp_converged = (disp_ratio <= mDispRatioTolerance || disp_abs <= mDispAbsTolerance);
const bool lm_converged = (!mEnsureContact && lm_solution_norm == 0.0) ? true : (lm_ratio <= mLMRatioTolerance || lm_abs <= mLMAbsTolerance);
if (disp_converged && lm_converged) {
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& table = p_table->GetTable();
if (mPrintingOutput == false)
table << BOLDFONT(FGRN(" Achieved"));
else
table << "Achieved";
} else {
if (mPrintingOutput == false)
KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << BOLDFONT("\tDoF") << " convergence is " << BOLDFONT(FGRN("achieved")) << std::endl;
else
KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << "\tDoF convergence is achieved" << std::endl;
}
}
return true;
} else {
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& table = p_table->GetTable();
if (mPrintingOutput == false)
table << BOLDFONT(FRED(" Not achieved"));
else
table << "Not achieved";
} else {
if (mPrintingOutput == false)
KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << BOLDFONT("\tDoF") << " convergence is " << BOLDFONT(FRED(" not achieved")) << std::endl;
else
KRATOS_INFO("DisplacementLagrangeMultiplierContactCriteria") << "\tDoF convergence is not achieved" << std::endl;
}
}
return false;
}
}
else // In this case all the displacements are imposed!
return true;
}
/**
* @brief This function initialize the convergence criteria
* @param rModelPart Reference to the ModelPart containing the contact problem. (unused)
*/
void Initialize( ModelPart& rModelPart ) override
{
BaseType::mConvergenceCriteriaIsInitialized = true;
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
if (r_process_info.Has(TABLE_UTILITY) && mTableIsInitialized == false) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& table = p_table->GetTable();
table.AddColumn("DP RATIO", 10);
table.AddColumn("EXP. RAT", 10);
table.AddColumn("ABS", 10);
table.AddColumn("EXP. ABS", 10);
table.AddColumn("LM RATIO", 10);
table.AddColumn("EXP. RAT", 10);
table.AddColumn("ABS", 10);
table.AddColumn("EXP. ABS", 10);
table.AddColumn("CONVERGENCE", 15);
mTableIsInitialized = true;
}
}
///@}
///@name Operations
///@{
///@}
///@name Acces
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Friends
///@{
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
bool mEnsureContact; /// This "flag" is used to check that the norm of the LM is always greater than 0 (no contact)
bool mPrintingOutput; /// If the colors and bold are printed
bool mTableIsInitialized; /// If the table is already initialized
TDataType mDispRatioTolerance; /// The ratio threshold for the norm of the displacement
TDataType mDispAbsTolerance; /// The absolute value threshold for the norm of the displacement
TDataType mLMRatioTolerance; /// The ratio threshold for the norm of the LM
TDataType mLMAbsTolerance; /// The absolute value threshold for the norm of the LM
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Unaccessible methods
///@{
///@}
};
///@} // Kratos classes
///@} // Application group
}
#endif /* KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_CONTACT_CRITERIA_H */
|
isotope.c | /* Copyright (C) 2015 Atsushi Togo */
/* All rights reserved. */
/* This file is part of phonopy. */
/* Redistribution and use in source and binary forms, with or without */
/* modification, are permitted provided that the following conditions */
/* are met: */
/* * Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* * Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in */
/* the documentation and/or other materials provided with the */
/* distribution. */
/* * Neither the name of the phonopy project nor the names of its */
/* contributors may be used to endorse or promote products derived */
/* from this software without specific prior written permission. */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */
/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */
/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */
/* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */
/* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */
/* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */
/* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */
/* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */
/* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */
/* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
#include <stdlib.h>
#include <phonoc_const.h>
#include <phonoc_utils.h>
#include <other_h/isotope.h>
#include <lapack_wrapper.h>
void
iso_get_isotope_scattering_strength(double *gamma,
const size_t grid_point,
const double *mass_variances,
const double *frequencies,
const lapack_complex_double *eigenvectors,
const size_t num_grid_points,
const int *band_indices,
const size_t num_band,
const size_t num_band0,
const double sigma,
const double cutoff_frequency)
{
size_t i, j, k, l, m;
double *e0_r, *e0_i, e1_r, e1_i, a, b, f, *f0, dist, sum_g, sum_g_k;
e0_r = (double*)malloc(sizeof(double) * num_band * num_band0);
e0_i = (double*)malloc(sizeof(double) * num_band * num_band0);
f0 = (double*)malloc(sizeof(double) * num_band0);
for (i = 0; i < num_band0; i++) {
f0[i] = frequencies[grid_point * num_band + band_indices[i]];
for (j = 0; j < num_band; j++) {
e0_r[i * num_band + j] = lapack_complex_double_real
(eigenvectors[grid_point * num_band * num_band +
j * num_band + band_indices[i]]);
e0_i[i * num_band + j] = lapack_complex_double_imag
(eigenvectors[grid_point * num_band * num_band +
j * num_band + band_indices[i]]);
}
}
for (i = 0; i < num_band0; i++) {
gamma[i] = 0;
}
for (i = 0; i < num_band0; i++) { /* band index0 */
if (f0[i] < cutoff_frequency) {
continue;
}
sum_g = 0;
#pragma omp parallel for private(k, l, m, f, e1_r, e1_i, a, b, dist, sum_g_k) reduction(+:sum_g)
for (j = 0; j < num_grid_points; j++) {
sum_g_k = 0;
for (k = 0; k < num_band; k++) { /* band index */
f = frequencies[j * num_band + k];
if (f < cutoff_frequency) {
continue;
}
dist = gaussian(f - f0[i], sigma);
for (l = 0; l < num_band / 3; l++) { /* elements */
a = 0;
b = 0;
for (m = 0; m < 3; m++) {
e1_r = lapack_complex_double_real
(eigenvectors[j * num_band * num_band +
(l * 3 + m) * num_band + k]);
e1_i = lapack_complex_double_imag
(eigenvectors[j * num_band * num_band +
(l * 3 + m) * num_band + k]);
a += (e0_r[i * num_band + l * 3 + m] * e1_r +
e0_i[i * num_band + l * 3 + m] * e1_i);
b += (e0_i[i * num_band + l * 3 + m] * e1_r -
e0_r[i * num_band + l * 3 + m] * e1_i);
}
sum_g_k += (a * a + b * b) * mass_variances[l] * dist;
}
}
sum_g += sum_g_k;
}
gamma[i] = sum_g;
}
for (i = 0; i < num_band0; i++) {
/* Frequency unit to ang-freq: *(2pi)**2/(2pi) */
/* Ang-freq to freq unit (for lifetime): /2pi */
/* gamma = 1/2t */
gamma[i] *= M_2PI / 4 * f0[i] * f0[i] / 2;
}
free(f0);
f0 = NULL;
free(e0_r);
e0_r = NULL;
free(e0_i);
e0_i = NULL;
}
void iso_get_thm_isotope_scattering_strength
(double *gamma,
const size_t grid_point,
const size_t *ir_grid_points,
const int *weights,
const double *mass_variances,
const double *frequencies,
const lapack_complex_double *eigenvectors,
const size_t num_grid_points,
const int *band_indices,
const size_t num_band,
const size_t num_band0,
const double *integration_weights,
const double cutoff_frequency)
{
size_t i, j, k, l, m, gp;
double *e0_r, *e0_i, *f0, *gamma_ij;
double e1_r, e1_i, a, b, f, dist, sum_g_k;
e0_r = (double*)malloc(sizeof(double) * num_band * num_band0);
e0_i = (double*)malloc(sizeof(double) * num_band * num_band0);
f0 = (double*)malloc(sizeof(double) * num_band0);
for (i = 0; i < num_band0; i++) {
f0[i] = frequencies[grid_point * num_band + band_indices[i]];
for (j = 0; j < num_band; j++) {
e0_r[i * num_band + j] = lapack_complex_double_real
(eigenvectors[grid_point * num_band * num_band +
j * num_band + band_indices[i]]);
e0_i[i * num_band + j] = lapack_complex_double_imag
(eigenvectors[grid_point * num_band * num_band +
j * num_band + band_indices[i]]);
}
}
gamma_ij = (double*)malloc(sizeof(double) * num_grid_points * num_band0);
#pragma omp parallel for
for (i = 0; i < num_grid_points * num_band0; i++) {
gamma_ij[i] = 0;
}
#pragma omp parallel for private(j, k, l, m, f, gp, e1_r, e1_i, a, b, dist, sum_g_k)
for (i = 0; i < num_grid_points; i++) {
gp = ir_grid_points[i];
for (j = 0; j < num_band0; j++) { /* band index0 */
if (f0[j] < cutoff_frequency) {
continue;
}
sum_g_k = 0;
for (k = 0; k < num_band; k++) { /* band index */
f = frequencies[gp * num_band + k];
if (f < cutoff_frequency) {
continue;
}
dist = integration_weights[gp * num_band0 * num_band +
j * num_band + k];
for (l = 0; l < num_band / 3; l++) { /* elements */
a = 0;
b = 0;
for (m = 0; m < 3; m++) {
e1_r = lapack_complex_double_real
(eigenvectors
[gp * num_band * num_band + (l * 3 + m) * num_band + k]);
e1_i = lapack_complex_double_imag
(eigenvectors
[gp * num_band * num_band + (l * 3 + m) * num_band + k]);
a += (e0_r[j * num_band + l * 3 + m] * e1_r +
e0_i[j * num_band + l * 3 + m] * e1_i);
b += (e0_i[j * num_band + l * 3 + m] * e1_r -
e0_r[j * num_band + l * 3 + m] * e1_i);
}
sum_g_k += (a * a + b * b) * mass_variances[l] * dist;
}
}
gamma_ij[gp * num_band0 + j] = sum_g_k * weights[gp];
}
}
for (i = 0; i < num_band0; i++) {
gamma[i] = 0;
}
for (i = 0; i < num_grid_points; i++) {
gp = ir_grid_points[i];
for (j = 0; j < num_band0; j++) {
gamma[j] += gamma_ij[gp * num_band0 + j];
}
}
for (i = 0; i < num_band0; i++) {
/* Frequency unit to ang-freq: *(2pi)**2/(2pi) */
/* Ang-freq to freq unit (for lifetime): /2pi */
/* gamma = 1/2t */
gamma[i] *= M_2PI / 4 * f0[i] * f0[i] / 2;
}
free(gamma_ij);
gamma_ij = NULL;
free(f0);
f0 = NULL;
free(e0_r);
e0_r = NULL;
free(e0_i);
e0_i = NULL;
}
|
boz.c | /* boz.c by Mark Neyrinck */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "voz.h"
#define PRNTHR 1000
#define NPTOTOL 3000
#define NGRID 10
#define PI 3.14159265359
/* #define NSIM 512. */
typedef struct Zone {
realT prob; /* Halo probability */
int *p; /* Particles in the central zone */
int np; /* Number of them in the central zone */
int *z; /* Other zones attached to the zone */
int nz; /* Number of them */
int nptot; /* Total number of them */
int npunb; /* Total number of them after unbinding */
int core;
realT corevol;
int mpp; /* Particle with deepest potential */
char mppincentral; /* Whether or not mpp is in central zone */
int mbp; /* Most bound particle */
char mbpincentral; /* Whether or not mbp is in central zone */
realT potmin;
realT pottot;
realT bndmin;
realT bndtot;
realT v[3];
} ZONE;
void findrtop(realT *a, int na, int *iord, int nb);
int compar(const void * n1, const void * n2) {
int i1,i2;
i1 = *(int *)n1;
i2 = *(int *)n2;
return 2*(i1 > i2) - 1 + (i1 == i2);
}
realT sq(realT x) {
return(x*x);
}
void dbg(char *c) {
printf("%s\n",c); fflush(stdout);
return;
}
int posread(char *posfile, realT ***p, realT fact);
int velread(char *velfile, realT ***v, realT fact);
int main(int argc,char **argv) {
FILE *pos, *inz, *outz, *outztmp, *outlist;
int i, j, h, h2,h3,c, np, nhaloes, nhunb;
int nsim;
int *hp, niter;
char *unb;
realT e1;
ZONE *z;
realT hubble,pot;
realT **p, **v;
realT potconstreal, potconst;
realT boundness, boundnesslow, boundnesshigh, maxboundness;
realT dist, distp,v2;
int numunbound, oldnumunbound, unbindthis;
char *posfile, *velfile, *inzfile, *outzfile, *outlistfile;
char systemstr[80];
realT boxsize, boxsize2;
int p1,p2;
realT *sorter, maxprob;
realT *sortermass;
int *tot2unb, *iord;
realT **hr,**gc,xg,yg,zg;
int d,skippy,maxnptot,***gn,**gm,g0,g1,g2,gcrit[3];
realT cell,a,omegam;
char ok, existold;
int halready,nhaloesoutz,nread;
realT potfact,ke;
realT *koverp,koverpmax,multiplier;
realT *bndarr,*potarr;
e1 = exp(1.)-1.;
halready = 0;
if (argc != 11) {
printf("Wrong number of arguments.\n");
printf("arg1: box size\n");
printf("arg2: nsim\n");
printf("arg3: Omega_matter\n");
printf("arg4: scale factor a\n");
printf("arg5: position file\n");
printf("arg6: velocity file\n");
printf("arg7: input zone file\n");
printf("arg8: output bound zone file\n");
printf("arg9: output text file\n");
printf("arg10: unbinding f\n\n");
exit(0);
}
if (sscanf(argv[1],"%"vozRealSym,&boxsize) != 1) {
printf("%s is not a valid boxsize\n",argv[1]);
exit(0);
}
boxsize2 = boxsize/2.;
if (sscanf(argv[2],"%d",&nsim) != 1) {
printf("%s is not a valid Nsim\n",argv[2]);
exit(0);
}
if (sscanf(argv[3],"%"vozRealSym,&omegam) != 1) {
printf("%s is not a valid Omega_matter\n",argv[3]);
exit(0);
}
if (sscanf(argv[4],"%"vozRealSym,&a) != 1) {
printf("%s is not a valid scale factor\n",argv[4]);
exit(0);
}
posfile = argv[5];
velfile = argv[6];
inzfile = argv[7];
outzfile = argv[8];
outlistfile = argv[9];
if (sscanf(argv[10],"%"vozRealSym,&potfact) != 1) {
printf("%s is not a valid potfact\n",argv[10]);
exit(0);
}
cell = boxsize/nsim;
hubble = 100. * sqrt(omegam/(a*a*a) + 1. - omegam);
/* Assumes Omega_matter + Omega_lambda = 1 */
/* There's no little_h because distance measures are already in Mpc/h */
printf("a = %g => Multiplying H0 by %e\n",a,hubble/100.);
/* When A != 1 */
potconstreal = omegam * 3.*cell*cell*cell*100.*100./(8.*PI);
/* GM_particle, plus conversion factors to (km/sec)^2 */
printf("cell, potconst = %g,%e\n",cell,potconstreal);
/* Read positions & velocities */
np = posread(posfile,&p,boxsize);
if (velread(velfile,&v,1.0) != np) {
printf("Numbers of particles don't match! Exiting.\n");
exit(0);
}
/* Pre-unbinding halo list */
inz = fopen(inzfile, "r");
if (inz == NULL) {
printf("Unable to open %s\n\n",inzfile);
}
fread(&np,1,sizeof(int),inz);
fread(&nhaloes,sizeof(int),1,inz);
printf("np = %d, nhaloes = %d\n",np,nhaloes);fflush(stdout);
z = (ZONE *)malloc(nhaloes*sizeof(ZONE));
for (h=0; h<nhaloes; h++) {
fread(&(z[h].np),sizeof(int),1,inz);
z[h].p = (int *)malloc(z[h].np*sizeof(int));
fread(z[h].p,sizeof(int),z[h].np,inz);
}
maxprob = 0.;
for (h=0; h<nhaloes;h++) {
fread(&(z[h].prob),sizeof(realT),1,inz);
if (z[h].prob > maxprob) maxprob = z[h].prob;
fread(&(z[h].core),sizeof(int),1,inz);
fread(&(z[h].corevol),sizeof(realT),1,inz);
fread(&(z[h].nz),sizeof(int),1,inz);
z[h].z = (int *)malloc(z[h].nz*sizeof(int));
fread(z[h].z,sizeof(int),z[h].nz,inz);
}
fclose(inz);
/* Assign cores, nptots, determine maximum */
maxnptot = 0;
for (h=0; h<nhaloes; h++) {
z[h].nptot = 0;
z[h].npunb = -1;
for (h2 = 0; h2 < z[h].nz; h2++)
z[h].nptot += z[z[h].z[h2]].np;
if (z[h].nptot > maxnptot) maxnptot = z[h].nptot;
}
hp = (int *)malloc(maxnptot*sizeof(int));
koverp = (realT *)malloc(maxnptot*sizeof(realT));
bndarr = (realT *)malloc(maxnptot*sizeof(realT));
potarr = (realT *)malloc(maxnptot*sizeof(realT));
unb = (char *)malloc(maxnptot*sizeof(char));
/* Stores boundedness; 0 = bound, 1 = freshly unbound, 2 = unbound in last iteration*/
hr = (realT **)malloc(3*sizeof(realT *));
h = 0;
/* Read stuff from ubz file if it exists */
outz = fopen(outzfile, "r");
if (outz == NULL) {
existold = 0;
} else {
fclose(outz);
printf("Previous output bound zone file found.\n");
printf("Moving it to boz.tmp and reading from it ...\n");
sprintf(systemstr,"mv %s boz.tmp",outzfile);
system(systemstr);
existold = 1;
}
outz = fopen(outzfile,"w");
fwrite(&nhaloes,sizeof(int),1,outz);
if (existold == 1) {
outztmp = fopen("boz.tmp", "r");
fread(&nhaloesoutz,sizeof(int),1,outztmp);
if (nhaloesoutz != nhaloes) {
printf("Numbers of haloes (%d,%d) don't match!\n",nhaloes,nhaloesoutz);
printf("Not using previous outz file\n");
} else {
ok = 1;
while (ok) {
nread = fread(&(z[h].npunb),sizeof(int),1,outztmp);
if (nread != 1) {
printf("End/corruption of outz file encountered, at h=%d.\n",h);
ok = 0;
} else {
if (z[h].npunb > maxnptot) {
printf("Fatal error reading halo %d:\n",h);
printf("Number of bound particles (%d) exceeds maxnptot.",z[h].npunb);fflush(stdout);
exit(0);
}
if (z[h].npunb > 0) {
nread = fread(hp,sizeof(int),z[h].npunb,outztmp);
if (nread != z[h].npunb) {
printf("End/corruption of outz file encountered, at h=%d.\n",h); fflush(stdout);
ok = 0;
} else {
fwrite(&(z[h].npunb),sizeof(int),1,outz);
fwrite(hp,sizeof(int),z[h].npunb,outz);
/* Calculate v for halo */
DL z[h].v[d] = 0.;
for (i=0; i<z[h].npunb; i++) {
DL z[h].v[d] += v[d][hp[i]];
}
DL z[h].v[d] /= (realT)z[h].npunb;
}
} else {
fwrite(&(z[h].npunb),sizeof(int),1,outz);
}
}
if (ok) h++;
else z[h].npunb = -1; /* h!++ because we have to redo it */
}
}
fclose(outztmp);
halready = h;
printf("%d haloes read from file.\n",halready-1);
}
sorter = (realT *)malloc(maxnptot*sizeof(realT));
if (NGRID > 0) {
gc = (realT **)malloc(3*sizeof(realT));
gm = (int **)malloc(3*sizeof(int));
DL {
hr[d] = (realT *)malloc(maxnptot*sizeof(realT));
gm[d] = (int *)malloc(maxnptot*sizeof(int));
gc[d] = (realT *)malloc((NGRID+1)*sizeof(realT));
}
gn = (int ***)malloc(NGRID*sizeof(int **));
for (i=0; i<NGRID; i++) {
gn[i] = (int **)malloc(NGRID*sizeof(int *));
for (j=0; j<NGRID; j++)
gn[i][j] = (int *)malloc(NGRID*sizeof(int));
}
}
/* Unbinding loop */
for (h=halready; h<nhaloes; h++) {
if (z[h].nptot > PRNTHR) {
printf("Halo %d: %d ->",h,z[h].nptot); fflush(stdout);
}
j = 0;
for (h2 = 0; h2 < z[h].nz; h2++) {
h3 = z[h].z[h2];
for (i = 0; i<z[h3].np; i++) {
if ((h2 > 0) && (z[h3].p[i] == z[h].core))
printf("Core:%d,%d\n",z[h].np,j);
hp[j] = z[h3].p[i];
DL hr[d][j] = p[hp[j]][d];
j++;
}
}
for (i=0; i<z[h].nptot; i++) {
unb[i] = 0;
/* Periodic Boundary Conditions -- comment out if no PBC's */
DL {
if ((hr[d][i] - p[z[h].core][d]) > boxsize2) hr[d][i] -= boxsize;
if ((hr[d][i] - p[z[h].core][d]) < -boxsize2) hr[d][i] += boxsize;
}
/* End of PBC handling */
}
if (j != z[h].nptot) printf("j = %d, znt = %d!\n",j,z[h].nptot);
z[h].npunb = z[h].nptot;
if (z[h].nptot == 1) { /* Singleton halo */
z[h].npunb = 0;
} else if (z[h].nptot < NPTOTOL) { /* If it's small enough to be
treated normally */
numunbound = 1;
/* 1 by 1 unbinding:*/
/*while ((numunbound > 0) && (z[h].npunb > 1)) {
potconst = potconstreal;
maxboundness = 0.; unbindthis = -1;*/
/* start lowthresh unbinding */
niter = 0;
multiplier = 2.;
while (((numunbound > 0) || (multiplier > 1.)) && (z[h].npunb > 1)) {
niter++;
if (multiplier > potfact) multiplier /= potfact;
else multiplier = 1.;
potconst = potconstreal*multiplier;
/* end lowthresh unbinding */
numunbound = 0;
/* Calculate velocity centroids */
j = 0;
for (i=0; i<z[h].np; i++)
if (unb[i] < 2) j++;
if (j > 0) {
DL z[h].v[d] = 0.;
DL z[h].v[d] = 0.;
for (i=0; i<z[h].np; i++) /* only include the core zone (np instead of nptot) */
if (unb[i] < 2) {/* if not already unbound */
DL z[h].v[d] += v[d][hp[i]];
}
if (j > 0) DL z[h].v[d] /= (realT)j;
} /* If no particles in the original zone are bound,
use z[h].v[] from the last iteration */
#pragma omp parallel for default(none) shared(z,v,hp,unb,p,hr,potconst,potconstreal,hubble,h,maxboundness,unbindthis,koverp,niter,potarr,bndarr) private (p1,pot,boundness,d,i,j,ke)
for (i=0; i< z[h].nptot; i++) {
p1 = hp[i];
if (unb[i] < 2) {
ke = 0.;
DL ke += sq(hubble*(p[p1][d]-p[z[h].core][d]) +
(v[d][p1]-z[h].v[d]));
ke *= 0.5;
pot = 0.;
for (j=0; j< z[h].nptot; j++)
if (unb[j] < 2)
if (i != j) {
pot += 1./sqrt(sq(hr[0][i]-hr[0][j]) + sq(hr[1][i]-hr[1][j])
+ sq(hr[2][i]-hr[2][j]));
/* This is where the potential is calculated */
}
boundness = ke - potconst * pot;
/*if (boundness > maxboundness) {
unbindthis = i;
maxboundness = boundness;
}*//* 1 by 1 unbinding */
koverp[i] = ke / (potconstreal*pot);
if ((boundness > 0.) && (niter > 1)) {
unb[i] = 1;
} /* lowthresh unbinding */
}
if (unb[i] == 0) {
potarr[i] = -pot;
bndarr[i] = boundness;
}
else {
potarr[i] = 0.;
bndarr[i] = 0.;
}
}
koverpmax = 1.;
for (i=0;i<z[h].nptot; i++)
if ((unb[i] < 2) && (koverp[i] > koverpmax))
koverpmax = koverp[i];
if (niter == 1) {
multiplier = koverpmax;
if (z[h].nptot > PRNTHR) printf("m:%g ",multiplier);
} else {
for (i=0;i<z[h].nptot;i++)
if (unb[i] == 1) {
numunbound++;
unb[i] = 2;
}
if ((numunbound == 0) && (multiplier > potfact)) {
/*printf("%g %g\n",koverpmax,multiplier);fflush(stdout);*/
multiplier = koverpmax;
}
/* lowthresh unbinding */
/*if (unbindthis > -1) {
numunbound = 1;
unb[unbindthis] = 2;
}*/ /* 1 by 1 unbinding -- remove niter if statement, too */
z[h].npunb -= numunbound;
if (z[h].nptot > PRNTHR) { /* see how many were unbound */
printf(" %d",z[h].npunb); fflush(stdout);
}
}
}
} else {
/* Order particles in x,y,z to find gridpoints */
printf("G");fflush(stdout);
DL {
#pragma omp parallel for default(none) shared(sorter,hr,z,h,d) private (i)
for (i=0;i<z[h].nptot;i++)
sorter[i] = hr[d][i];
qsort(sorter, z[h].nptot, 4, &compar);
skippy = floor((realT)z[h].nptot/NGRID);
#pragma omp parallel for default(none) shared(sorter,gc,z,h,d,skippy) private (i)
for (i=skippy; i<z[h].nptot; i += skippy)
gc[d][i/skippy] = 0.5*(sorter[i]+sorter[i+1]);
gc[d][0] = sorter[0];
gc[d][NGRID] = sorter[z[h].nptot - 1];
}
/* Place each particle, count particles in each grid point */
#pragma omp parallel for default(none) shared(gn) private (g0,g1,g2)
for (g0=0;g0<NGRID;g0++)
for (g1=0;g1<NGRID;g1++)
for (g2=0;g2<NGRID;g2++)
gn[g0][g1][g2] = 0;
#pragma omp parallel for default(none) shared(z,h,gm,hr,gc) private (i,d,j)
for (i=0; i<z[h].nptot; i++)
DL {
for (j=1; (hr[d][i] > gc[d][j]) && (j <= NGRID); j++);
if (j > NGRID) j--;
gm[d][i] = j-1;
}
for (i=0; i<z[h].nptot; i++) {
gn [gm[0][i]] [gm[1][i]] [gm[2][i]] ++;
}
/* Start unbinding */
numunbound = 1;
/* 1 by 1 unbinding:*/
/*while ((numunbound > 0) && (z[h].npunb > 1)) {
potconst = potconstreal;
maxboundness = 0.; unbindthis = -1;*/
/* start lowthresh unbinding */
niter = 0;
multiplier = 2.;
while (((numunbound > 0) || (multiplier > 1.)) && (z[h].npunb > 1)) {
niter++;
if (multiplier > potfact) multiplier /= potfact;
else multiplier = 1.;
potconst = potconstreal*multiplier;
/* end lowthresh unbinding */
numunbound = 0;
/* Calculate velocity centroids */
j = 0;
for (i=0; i<z[h].np; i++)
if (unb[i] < 2) j++;
if (j > 0) {
DL z[h].v[d] = 0.;
for (i=0; i<z[h].np; i++) /* only include the core zone (np instead of nptot) */
if (unb[i] < 2) {/* if not already unbound */
DL z[h].v[d] += v[d][hp[i]];
}
if (j > 0) DL z[h].v[d] /= (realT)j;
} /* If no particles in the original zone are bound,
use z[h].v[] from the last iteration */
#pragma omp parallel for default(none) shared(unb,hp,hubble,p,v,z,h,gm,hr,gc,gn,potconst,potarr,bndarr,unbindthis,koverp,potconstreal,niter) private (i,p1,gcrit,g0,g1,g2,pot,boundness,boundnesshigh,boundnesslow,xg,yg,zg,j,d,ke)
for (i=0; i< z[h].nptot; i++) {
p1 = hp[i];
if (unb[i] < 2) {
ke = 0.;
DL {
ke += sq(hubble*(p[p1][d]-p[z[h].core][d])
+ (v[d][p1]-z[h].v[d]));
gcrit[d] = gm[d][i] +
(hr[d][i] > 0.5*(gc[d][gm[d][i]]+gc[d][gm[d][i]+1]));
}
ke *= 0.5;
pot = 0.;
/* First see if it's bound, using a shallower, easier potential */
for (g0=0; g0<NGRID; g0++) {
xg = gc[0][g0 + (g0 >= gcrit[0])];
for (g1=0; g1<NGRID; g1++) {
yg = gc[1][g1 + (g1 >= gcrit[1])];
for (g2=0; g2<NGRID; g2++) {
zg = gc[2][g2 + (g2 >= gcrit[2])]; /* Could be speeded up*/
pot += (realT)(gn[g0][g1][g2])/sqrt(sq(hr[0][i] - xg) +
sq(hr[1][i] - yg) + sq(hr[2][i] - zg));
}
}
}
/* Take out the self-pair */
pot -= 1./sqrt(sq(hr[0][i] - gc[0][gm[0][i]+(gm[0][i]>=gcrit[0])]) +
sq(hr[1][i] - gc[1][gm[1][i]+(gm[1][i]>=gcrit[1])]) +
sq(hr[2][i] - gc[2][gm[2][i]+(gm[2][i]>=gcrit[2])]));
boundness = ke - potconst * pot;
boundnesslow = boundness;
if (boundness > 0.) {
/* Not bound by this criterion; try a potential deeper than
the true one */
pot = 0.;
for (g0=0; g0<NGRID; g0++) {
xg = (g0 == gm[0][i]) ? hr[0][i] : gc[0][g0 + (g0<gcrit[0])];
for (g1=0; g1<NGRID; g1++) {
yg = (g1 == gm[1][i]) ? hr[1][i] : gc[1][g1 + (g1<gcrit[1])];
for (g2=0; g2<NGRID; g2++) {
zg = (g2 == gm[2][i]) ? hr[2][i] : gc[2][g2 + (g2<gcrit[2])];
if ((g0 != gm[0][i]) || (g1 != gm[1][i]) || (g2 != gm[2][i])) {
/* Unless we're in the same cell */
pot += (realT)gn[g0][g1][g2] /
sqrt(sq(hr[0][i] - xg) + sq(hr[1][i] - yg) +
sq(hr[2][i] - zg));
}
}
}
}
/* Now do the ones in the same cell by brute force */
for (j=0; j< z[h].nptot; j++)
if (gm[0][i] == gm[0][j])
if (gm[1][i] == gm[1][j])
if (gm[2][i] == gm[2][j])
if (unb[j] < 2)
if (i != j)
pot += 1./sqrt(sq(hr[0][i]-hr[0][j]) + sq(hr[1][i]-hr[1][j])
+ sq(hr[2][i]-hr[2][j]));
boundness = ke - potconst * pot;
boundnesshigh = boundness;
if (boundness < 0.) {
/* The true boundness is too close to zero to use our
easy estimates; we need the brute-force potential */
pot = 0.;
for (j=0; j< z[h].nptot; j++)
if (unb[j] < 2)
if (i != j)
pot += 1./sqrt(sq(hr[0][i]-hr[0][j]) + sq(hr[1][i]-hr[1][j])
+ sq(hr[2][i]-hr[2][j]));
boundness = ke - potconst * pot;
}
}
/*if (boundness > maxboundness) {
unbindthis = i;
maxboundness = boundness;
}*/ /* 1 by 1 unbinding */
koverp[i] = ke / (potconstreal*pot);
if ((boundness > 0.) && (niter > 1)) {
unb[i] = 1;
} /* lowthresh unbinding */
}
if (unb[i] == 0) {
bndarr[i] = boundness;
potarr[i] = -pot;
}
else {
bndarr[i] = 0.;
potarr[i] = 0.;
}
}
koverpmax = 1.;
for (i=0;i<z[h].nptot; i++)
if ((unb[i] < 2) && (koverp[i] > koverpmax))
koverpmax = koverp[i];
if (niter == 1) {
multiplier = koverpmax;
if (z[h].nptot > PRNTHR) printf("m:%g ",multiplier);
} else {
for (i=0; i<z[h].nptot; i++)
if (unb[i] == 1) {
numunbound++;
unb[i] = 2;
gn[gm[0][i]][gm[1][i]][gm[2][i]] --;
}
if ((numunbound == 0) && (multiplier > potfact))
multiplier = koverpmax;
/* lowthresh unbinding */
/*if (unbindthis > -1) {
numunbound = 1;
unb[unbindthis] = 2;
gn[gm[0][unbindthis]][gm[1][unbindthis]][gm[2][unbindthis]] --;
}*/ /* 1 by 1 unbinding */
z[h].npunb -= numunbound;
if (z[h].nptot > PRNTHR) { /* see how many were unbound */
printf(" %d",z[h].npunb); fflush(stdout);
/*printf(",%g",maxboundness);*/
}
}
}
}
if (z[h].nptot > PRNTHR) printf("\n");
if (z[h].npunb == 1) z[h].npunb = 0;
/* Find most bound particle, total boundness, max boundness */
z[h].potmin = 0.;
z[h].pottot = 0.;
z[h].bndmin = 0.;
z[h].bndtot = 0.;
for (i = 0; i<z[h].nptot; i++) {
if (unb[i] == 0) {
if (potarr[i] < z[h].potmin) {
z[h].potmin = potarr[i];
z[h].mpp = hp[i];
z[h].mppincentral = (i<z[h].np);
distp = 0.;
ke = 0.;
v2 = 0.;
DL {
distp += sq(p[hp[i]][d]-p[z[h].core][d]);
}
ke *= 0.5;
distp = sqrt(dist);
}
z[h].pottot += potarr[i];
if (bndarr[i] < z[h].bndmin) {
z[h].bndmin = bndarr[i];
z[h].mbp = hp[i];
z[h].mbpincentral = (i<z[h].np);
dist = 0.;
ke = 0.;
v2 = 0.;
DL {
v2 += sq(v[d][hp[i]]-z[h].v[d]);
ke += sq(hubble*(p[hp[i]][d]-p[z[h].core][d])
+ (v[d][hp[i]]-z[h].v[d]));
dist += sq(p[hp[i]][d]-p[z[h].core][d]);
}
ke *= 0.5;
dist = sqrt(dist);
}
z[h].bndtot += bndarr[i];
}
}
/*if (z[h].nptot > PRNTHR) {
printf("m?p: b:%1d,%f %1d,%f\n",
(int)z[h].mbpincentral, distp,(int)z[h].mppincentral,dist);
}*/
/* Output the bound particles */
fwrite(&(z[h].npunb),sizeof(int),1,outz);
if (z[h].npunb > 0)
for (i=0; i<z[h].nptot; i++) {
if (unb[i] < 2) {
fwrite(&hp[i],sizeof(int),1,outz);
}
}
fflush(outz);
}
fclose(outz);
printf("Done! Outputting ...\n");fflush(stdout);
nhunb = 0;
for (i=0; i<nhaloes; i++)
if (z[i].npunb > 0)
nhunb++;
tot2unb = (int *)malloc(nhunb*sizeof(int));
/* Sort the haloes by bound mass, then pre-unbound mass, then prob */
maxnptot++;
h = 0;
sortermass = (realT *)malloc(nhunb*sizeof(realT));
for (i=0; i<nhaloes; i++) {
if (z[i].npunb > 0) {
tot2unb[h] = i;
sortermass[h] = (realT)z[i].npunb +
log((e1*((realT)z[i].nptot + log((e1*(realT)z[i].prob+(realT)maxprob)/(realT)maxprob)) +
(realT)maxnptot)/(realT)maxnptot);
/*printf("%f, %d, %d, %e\n",sortermass[h],z[i].npunb,z[i].nptot,z[i].prob);*/
h++;
}
}
iord = (int *)malloc(nhunb*sizeof(int));
findrtop(sortermass, nhunb, iord, nhunb);
outlist = fopen(outlistfile, "w");
printf("Nhunb = %d\n",nhunb);
fprintf(outlist, "%d\t%d\n",nhaloes,nhunb);
for (i=0; i<nhunb; i++) {
h = tot2unb[iord[i]];
if (z[h].npunb > 0) {
c = z[h].core;
/* %d Halo number,
%d # bound particles,
%d # total particles,
%e peak-to-"strongest link" density ratio,
%e volume of peak's cell (inverse of its density),
%e sum of boundnesses for all particles,
%e deepest boundness,
(Could also return deepest value of potential, sum of all potentials)
%d "core" (peak particle ID)
%d most bound particle ID
%d particle ID at potential minimum
%d is most bound particle in central zone? (1 if yes, 0 if no)
%d is deepest-potential particle in central zone? (1 if yes, 0 if no)
(if one of these is zero, COULD indicate weird/spurious/duplicate halo)
%f%f%f x,y,z coords of central zone (in units s.t. 1 is the boxsize)
%e%e%e vx,vy,vy velocity centroid in km/sec */
fprintf(outlist,
"%d \t%d \t%d \t%e %e %e %e \t%d\t%d\t%d\t%1d %1d %8.6f %8.6f %8.6f %e %e %e\n",
h, z[h].npunb, z[h].nptot,z[h].prob,z[h].corevol,
z[h].bndtot,z[h].bndmin, c, z[h].mbp, z[h].mpp,
(int)z[h].mbpincentral, (int)z[h].mppincentral,
p[c][0]/boxsize/cell, p[c][1]/boxsize/cell, p[c][2]/boxsize/cell,
z[h].v[0],z[h].v[1],z[h].v[2]);
}
}
fclose(outlist);
return(0);
}
|
team.c | /* Copyright (C) 2005-2017 Free Software Foundation, Inc.
Contributed by Richard Henderson <rth@redhat.com>.
This file is part of the GNU Offloading and Multi Processing Library
(libgomp).
Libgomp is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* This file handles the maintainence of threads in response to team
creation and termination. */
#include "libgomp.h"
#include "pool.h"
#include "hierarchy.h"
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <assert.h>
#include "migrate.h"
#ifdef LIBGOMP_USE_PTHREADS
/* This attribute contains PTHREAD_CREATE_DETACHED. */
pthread_attr_t gomp_thread_attr;
/* This key is for the thread destructor. */
pthread_key_t gomp_thread_destructor;
/* This is the libgomp per-thread data structure. */
#if defined HAVE_TLS || defined USE_EMUTLS
__thread struct gomp_thread gomp_tls_data;
#else
pthread_key_t gomp_tls_key;
#endif
/* Whether or not we're doing Popcorn a profiling run. Activates all associated
machinery. */
bool popcorn_profiling = false;
const char *popcorn_prof_fn = "popcorn-profile.txt";
FILE *popcorn_prof_fp = NULL;
/* This structure is used to communicate across pthread_create. */
struct gomp_thread_start_data
{
void (*fn) (void *);
void *fn_data;
struct gomp_team_state ts;
struct gomp_task *task;
struct gomp_thread_pool *thread_pool;
int popcorn_created_tid;
int popcorn_nid;
unsigned int place;
bool nested;
};
/* This function is a pthread_create entry point. This contains the idle
loop in which a thread waits to be called up to become part of a team. */
static void *
gomp_thread_start (void *xdata)
{
struct gomp_thread_start_data *data = xdata;
struct gomp_thread *thr;
struct gomp_thread_pool *pool;
void (*local_fn) (void *);
void *local_data;
#if defined HAVE_TLS || defined USE_EMUTLS
thr = &gomp_tls_data;
#else
thr = malloc(sizeof(struct gomp_thread));
pthread_setspecific (gomp_tls_key, thr);
#endif
gomp_sem_init (&thr->release, 0);
/* Extract what we need from data. */
local_fn = data->fn;
local_data = data->fn_data;
thr->thread_pool = data->thread_pool;
thr->ts = data->ts;
thr->task = data->task;
thr->place = data->place;
thr->popcorn_created_tid = data->popcorn_created_tid;
thr->popcorn_nid = data->popcorn_nid;
thr->ts.team->ordered_release[thr->ts.team_id] = &thr->release;
if (popcorn_profiling)
fprintf(popcorn_prof_fp, "%d %d\n", gettid(), thr->popcorn_created_tid);
/* Make thread pool local. */
pool = thr->thread_pool;
if (popcorn_global.distributed && thr->popcorn_nid)
migrate (thr->popcorn_nid, NULL, NULL);
if (data->nested)
{
struct gomp_team *team = thr->ts.team;
struct gomp_task *task = thr->task;
gomp_barrier_wait (&team->barrier);
local_fn (local_data);
gomp_team_barrier_wait_final (&team->barrier);
gomp_finish_task (task);
gomp_barrier_wait_last (&team->barrier);
}
else
{
pool->threads[thr->ts.team_id] = thr;
gomp_simple_barrier_wait (&pool->threads_dock);
do
{
struct gomp_team *team = thr->ts.team;
struct gomp_task *task = thr->task;
local_fn (local_data);
if (popcorn_global.hybrid_barrier)
hierarchy_hybrid_barrier_final (thr->popcorn_nid, "End parallel");
else
gomp_team_barrier_wait_final (&team->barrier);
gomp_finish_task (task);
gomp_simple_barrier_wait (&pool->threads_dock);
local_fn = thr->fn;
local_data = thr->data;
thr->fn = NULL;
}
while (local_fn);
}
/* Migrate back to origin just in case application migrated us elsewhere */
if (popcorn_global.distributed && current_nid() > 0)
migrate (0, NULL, NULL);
/* If distributed, wait for everybody to get back to origin before exiting */
if (popcorn_global.finished)
gomp_simple_barrier_wait (&pool->threads_dock);
gomp_sem_destroy (&thr->release);
thr->thread_pool = NULL;
thr->task = NULL;
#if !defined HAVE_TLS && !defined USE_EMUTLS
free(thr);
#endif
return NULL;
}
#endif
static inline struct gomp_team *
get_last_team (unsigned nthreads)
{
struct gomp_thread *thr = gomp_thread ();
if (thr->ts.team == NULL)
{
struct gomp_thread_pool *pool = gomp_get_thread_pool (thr, nthreads);
struct gomp_team *last_team = pool->last_team;
if (last_team != NULL && last_team->nthreads == nthreads)
{
pool->last_team = NULL;
return last_team;
}
}
return NULL;
}
/* Create a new team data structure. */
struct gomp_team *
gomp_new_team (unsigned nthreads)
{
struct gomp_team *team;
int i;
team = get_last_team (nthreads);
if (team == NULL)
{
size_t extra = sizeof (team->ordered_release[0])
+ sizeof (team->implicit_task[0]);
team = gomp_malloc (sizeof (*team) + nthreads * extra);
#ifndef HAVE_SYNC_BUILTINS
gomp_mutex_init (&team->work_share_list_free_lock);
#endif
gomp_barrier_init (&team->barrier, nthreads);
gomp_mutex_init (&team->task_lock);
team->nthreads = nthreads;
}
team->work_share_chunk = 8;
#ifdef HAVE_SYNC_BUILTINS
team->single_count = 0;
#endif
team->work_shares_to_free = &team->work_shares[0];
gomp_init_work_share (&team->work_shares[0], false, nthreads);
team->work_shares[0].next_alloc = NULL;
team->work_share_list_free = NULL;
team->work_share_list_alloc = &team->work_shares[1];
for (i = 1; i < 7; i++)
team->work_shares[i].next_free = &team->work_shares[i + 1];
team->work_shares[i].next_free = NULL;
gomp_sem_init (&team->master_release, 0);
team->ordered_release = (void *) &team->implicit_task[nthreads];
team->ordered_release[0] = &team->master_release;
priority_queue_init (&team->task_queue);
team->task_count = 0;
team->task_queued_count = 0;
team->task_running_count = 0;
team->work_share_cancelled = 0;
team->team_cancelled = 0;
return team;
}
/* Free a team data structure. */
static void
free_team (struct gomp_team *team)
{
#ifndef HAVE_SYNC_BUILTINS
gomp_mutex_destroy (&team->work_share_list_free_lock);
#endif
gomp_barrier_destroy (&team->barrier);
gomp_mutex_destroy (&team->task_lock);
priority_queue_free (&team->task_queue);
free (team);
}
static void
gomp_free_pool_helper (void *thread_pool)
{
struct gomp_thread *thr = gomp_thread ();
struct gomp_thread_pool *pool
= (struct gomp_thread_pool *) thread_pool;
gomp_simple_barrier_wait_last (&pool->threads_dock);
gomp_sem_destroy (&thr->release);
thr->thread_pool = NULL;
thr->task = NULL;
#ifdef LIBGOMP_USE_PTHREADS
pthread_exit (NULL);
#elif defined(__nvptx__)
asm ("exit;");
#else
#error gomp_free_pool_helper must terminate the thread
#endif
}
/*
* Because Popcorn doesn't currently support threads waiting on remote nodes
* while threads exit on the origin, release all threads waiting at the dock
* upon shutdown.
*/
static void __attribute__((destructor))
gomp_release_pool_threads_final ()
{
unsigned i;
struct gomp_thread *thr = gomp_thread ();
struct gomp_thread_pool *pool = thr->thread_pool;
if (popcorn_global.distributed)
{
/* Migrate back to origin just in case application migrated us
elsewhere */
if (current_nid() > 0)
migrate (0, NULL, NULL);
if (pool && pool->threads_dock.bar.total)
{
/* Signal not to run any more functions & end-of-application
cleanup */
for (i = 0; i < pool->threads_used; i++)
pool->threads[i]->fn = NULL;
popcorn_global.finished = true;
/* Break threads out of execution loop */
gomp_simple_barrier_wait (&pool->threads_dock);
/* Wait for everybody to migrate back */
gomp_simple_barrier_wait (&pool->threads_dock);
}
}
}
/* Free a thread pool and release its threads. */
void
gomp_free_thread (void *arg __attribute__((unused)))
{
struct gomp_thread *thr = gomp_thread ();
struct gomp_thread_pool *pool = thr->thread_pool;
if (pool)
{
if (pool->threads_used > 0)
{
int i;
for (i = 1; i < pool->threads_used; i++)
{
struct gomp_thread *nthr = pool->threads[i];
nthr->fn = gomp_free_pool_helper;
nthr->data = pool;
}
/* This barrier undocks threads docked on pool->threads_dock. */
gomp_simple_barrier_wait (&pool->threads_dock);
/* And this waits till all threads have called gomp_barrier_wait_last
in gomp_free_pool_helper. */
gomp_simple_barrier_wait (&pool->threads_dock);
/* Now it is safe to destroy the barrier and free the pool. */
gomp_simple_barrier_destroy (&pool->threads_dock);
#ifdef HAVE_SYNC_BUILTINS
__sync_fetch_and_add (&gomp_managed_threads,
1L - pool->threads_used);
#else
gomp_mutex_lock (&gomp_managed_threads_lock);
gomp_managed_threads -= pool->threads_used - 1L;
gomp_mutex_unlock (&gomp_managed_threads_lock);
#endif
}
if (pool->last_team)
free_team (pool->last_team);
#ifndef __nvptx__
free (pool->threads);
free (pool);
#endif
thr->thread_pool = NULL;
}
if (thr->ts.level == 0 && __builtin_expect (thr->ts.team != NULL, 0))
gomp_team_end ();
if (thr->task != NULL)
{
struct gomp_task *task = thr->task;
gomp_end_task ();
free (task);
}
}
/* Keep a counter of all threads launched. */
#ifndef HAVE_SYNC_BUILTINS
gomp_mutex_t popcorn_tid_lock;
#endif
static int popcorn_created_tid = 1;
/* Launch a team. */
#ifdef LIBGOMP_USE_PTHREADS
void
gomp_team_start (void (*fn) (void *), void *data, unsigned nthreads,
unsigned flags, struct gomp_team *team)
{
struct gomp_thread_start_data *start_data, *first_start = NULL;
struct gomp_thread *thr, *nthr;
struct gomp_task *task;
struct gomp_task_icv *icv;
bool nested;
struct gomp_thread_pool *pool;
unsigned i, n, old_threads_used = 0;
pthread_attr_t thread_attr, *attr;
unsigned long nthreads_var;
char bind, bind_var;
unsigned int s = 0, rest = 0, p = 0, k = 0;
unsigned int affinity_count = 0;
struct gomp_thread **affinity_thr = NULL;
unsigned int nodes, nid;
bool popcorn_place;
thr = gomp_thread ();
nested = thr->ts.level;
pool = thr->thread_pool;
task = thr->task;
icv = task ? &task->icv : &gomp_global_icv;
if (__builtin_expect (gomp_places_list != NULL, 0) && thr->place == 0)
gomp_init_affinity ();
popcorn_place = popcorn_global.distributed && !nested;
/* Always save the previous state, even if this isn't a nested team.
In particular, we should save any work share state from an outer
orphaned work share construct. */
team->prev_ts = thr->ts;
thr->ts.team = team;
thr->ts.team_id = 0;
++thr->ts.level;
if (nthreads > 1)
++thr->ts.active_level;
thr->ts.work_share = &team->work_shares[0];
thr->ts.last_work_share = NULL;
#ifdef HAVE_SYNC_BUILTINS
thr->ts.single_count = 0;
#endif
thr->ts.static_trip = 0;
thr->task = &team->implicit_task[0];
nthreads_var = icv->nthreads_var;
if (__builtin_expect (gomp_nthreads_var_list != NULL, 0)
&& thr->ts.level < gomp_nthreads_var_list_len)
nthreads_var = gomp_nthreads_var_list[thr->ts.level];
bind_var = icv->bind_var;
if (bind_var != omp_proc_bind_false && (flags & 7) != omp_proc_bind_false)
bind_var = flags & 7;
bind = bind_var;
if (__builtin_expect (gomp_bind_var_list != NULL, 0)
&& thr->ts.level < gomp_bind_var_list_len)
bind_var = gomp_bind_var_list[thr->ts.level];
gomp_init_task (thr->task, task, icv);
team->implicit_task[0].icv.nthreads_var = nthreads_var;
team->implicit_task[0].icv.bind_var = bind_var;
/* Re-initialize hierarchical data structures for Popcorn every time to avoid
racing child threads for updated per-node thread counts. */
if (popcorn_place)
{
for (nid = 0; nid < MAX_POPCORN_NODES; nid++)
{
popcorn_node[nid].sync.num = 0;
popcorn_node[nid].opt.num = 0;
}
thr->popcorn_nid = hierarchy_assign_node(0);
}
if (nthreads == 1)
{
if (popcorn_place)
{
hierarchy_init_global (1);
hierarchy_init_node (thr->popcorn_nid);
}
return;
}
i = 1;
if (__builtin_expect (gomp_places_list != NULL, 0))
{
/* Depending on chosen proc_bind model, set subpartition
for the master thread and initialize helper variables
P and optionally S, K and/or REST used by later place
computation for each additional thread. */
p = thr->place - 1;
switch (bind)
{
case omp_proc_bind_true:
case omp_proc_bind_close:
if (nthreads > thr->ts.place_partition_len)
{
/* T > P. S threads will be placed in each place,
and the final REM threads placed one by one
into the already occupied places. */
s = nthreads / thr->ts.place_partition_len;
rest = nthreads % thr->ts.place_partition_len;
}
else
s = 1;
k = 1;
break;
case omp_proc_bind_master:
/* Each thread will be bound to master's place. */
break;
case omp_proc_bind_spread:
if (nthreads <= thr->ts.place_partition_len)
{
/* T <= P. Each subpartition will have in between s
and s+1 places (subpartitions starting at or
after rest will have s places, earlier s+1 places),
each thread will be bound to the first place in
its subpartition (except for the master thread
that can be bound to another place in its
subpartition). */
s = thr->ts.place_partition_len / nthreads;
rest = thr->ts.place_partition_len % nthreads;
rest = (s + 1) * rest + thr->ts.place_partition_off;
if (p < rest)
{
p -= (p - thr->ts.place_partition_off) % (s + 1);
thr->ts.place_partition_len = s + 1;
}
else
{
p -= (p - rest) % s;
thr->ts.place_partition_len = s;
}
thr->ts.place_partition_off = p;
}
else
{
/* T > P. Each subpartition will have just a single
place and we'll place between s and s+1
threads into each subpartition. */
s = nthreads / thr->ts.place_partition_len;
rest = nthreads % thr->ts.place_partition_len;
thr->ts.place_partition_off = p;
thr->ts.place_partition_len = 1;
k = 1;
}
break;
}
}
else
bind = omp_proc_bind_false;
/* We only allow the reuse of idle threads for non-nested PARALLEL
regions. This appears to be implied by the semantics of
threadprivate variables, but perhaps that's reading too much into
things. Certainly it does prevent any locking problems, since
only the initial program thread will modify gomp_threads. */
if (!nested)
{
old_threads_used = pool->threads_used;
if (nthreads <= old_threads_used)
n = nthreads;
else if (old_threads_used == 0)
{
n = 0;
gomp_simple_barrier_init (&pool->threads_dock, nthreads);
}
else
{
n = old_threads_used;
/* Increase the barrier threshold to make sure all new
threads arrive before the team is released. */
gomp_simple_barrier_reinit (&pool->threads_dock, nthreads);
}
/* Not true yet, but soon will be. We're going to release all
threads from the dock, and those that aren't part of the
team will exit. */
pool->threads_used = nthreads;
/* If necessary, expand the size of the gomp_threads array. It is
expected that changes in the number of threads are rare, thus we
make no effort to expand gomp_threads_size geometrically. */
if (nthreads >= pool->threads_size)
{
pool->threads_size = nthreads + 1;
pool->threads
= gomp_realloc (pool->threads,
pool->threads_size
* sizeof (struct gomp_thread_data *));
}
/* Release existing idle threads. */
for (; i < n; ++i)
{
unsigned int place_partition_off = thr->ts.place_partition_off;
unsigned int place_partition_len = thr->ts.place_partition_len;
unsigned int place = 0;
if (__builtin_expect (gomp_places_list != NULL, 0))
{
switch (bind)
{
case omp_proc_bind_true:
case omp_proc_bind_close:
if (k == s)
{
++p;
if (p == (team->prev_ts.place_partition_off
+ team->prev_ts.place_partition_len))
p = team->prev_ts.place_partition_off;
k = 1;
if (i == nthreads - rest)
s = 1;
}
else
++k;
break;
case omp_proc_bind_master:
break;
case omp_proc_bind_spread:
if (k == 0)
{
/* T <= P. */
if (p < rest)
p += s + 1;
else
p += s;
if (p == (team->prev_ts.place_partition_off
+ team->prev_ts.place_partition_len))
p = team->prev_ts.place_partition_off;
place_partition_off = p;
if (p < rest)
place_partition_len = s + 1;
else
place_partition_len = s;
}
else
{
/* T > P. */
if (k == s)
{
++p;
if (p == (team->prev_ts.place_partition_off
+ team->prev_ts.place_partition_len))
p = team->prev_ts.place_partition_off;
k = 1;
if (i == nthreads - rest)
s = 1;
}
else
++k;
place_partition_off = p;
place_partition_len = 1;
}
break;
}
if (affinity_thr != NULL
|| (bind != omp_proc_bind_true
&& pool->threads[i]->place != p + 1)
|| pool->threads[i]->place <= place_partition_off
|| pool->threads[i]->place > (place_partition_off
+ place_partition_len))
{
unsigned int l;
if (affinity_thr == NULL)
{
unsigned int j;
/* Popcorn: remove allocas */
/*if (team->prev_ts.place_partition_len > 64)*/
affinity_thr
= gomp_malloc (team->prev_ts.place_partition_len
* sizeof (struct gomp_thread *));
/*else
affinity_thr
= gomp_alloca (team->prev_ts.place_partition_len
* sizeof (struct gomp_thread *));*/
memset (affinity_thr, '\0',
team->prev_ts.place_partition_len
* sizeof (struct gomp_thread *));
for (j = i; j < old_threads_used; j++)
{
if (pool->threads[j]->place
> team->prev_ts.place_partition_off
&& (pool->threads[j]->place
<= (team->prev_ts.place_partition_off
+ team->prev_ts.place_partition_len)))
{
l = pool->threads[j]->place - 1
- team->prev_ts.place_partition_off;
pool->threads[j]->data = affinity_thr[l];
affinity_thr[l] = pool->threads[j];
}
pool->threads[j] = NULL;
}
if (nthreads > old_threads_used)
memset (&pool->threads[old_threads_used],
'\0', ((nthreads - old_threads_used)
* sizeof (struct gomp_thread *)));
n = nthreads;
affinity_count = old_threads_used - i;
}
if (affinity_count == 0)
break;
l = p;
if (affinity_thr[l - team->prev_ts.place_partition_off]
== NULL)
{
if (bind != omp_proc_bind_true)
continue;
for (l = place_partition_off;
l < place_partition_off + place_partition_len;
l++)
if (affinity_thr[l - team->prev_ts.place_partition_off]
!= NULL)
break;
if (l == place_partition_off + place_partition_len)
continue;
}
nthr = affinity_thr[l - team->prev_ts.place_partition_off];
affinity_thr[l - team->prev_ts.place_partition_off]
= (struct gomp_thread *) nthr->data;
affinity_count--;
pool->threads[i] = nthr;
}
else
nthr = pool->threads[i];
place = p + 1;
}
else
nthr = pool->threads[i];
nthr->ts.team = team;
nthr->ts.work_share = &team->work_shares[0];
nthr->ts.last_work_share = NULL;
nthr->ts.team_id = i;
nthr->ts.level = team->prev_ts.level + 1;
nthr->ts.active_level = thr->ts.active_level;
nthr->ts.place_partition_off = place_partition_off;
nthr->ts.place_partition_len = place_partition_len;
#ifdef HAVE_SYNC_BUILTINS
nthr->ts.single_count = 0;
#endif
nthr->ts.static_trip = 0;
nthr->task = &team->implicit_task[i];
nthr->place = place;
if (popcorn_place)
nthr->popcorn_nid = hierarchy_assign_node(i);
gomp_init_task (nthr->task, task, icv);
team->implicit_task[i].icv.nthreads_var = nthreads_var;
team->implicit_task[i].icv.bind_var = bind_var;
nthr->fn = fn;
nthr->data = data;
team->ordered_release[i] = &nthr->release;
}
if (__builtin_expect (affinity_thr != NULL, 0))
{
/* If AFFINITY_THR is non-NULL just because we had to
permute some threads in the pool, but we've managed
to find exactly as many old threads as we'd find
without affinity, we don't need to handle this
specially anymore. */
if (nthreads <= old_threads_used
? (affinity_count == old_threads_used - nthreads)
: (i == old_threads_used))
{
/* Popcorn: removed alloca, always needs to be freed */
/*if (team->prev_ts.place_partition_len > 64)*/
free (affinity_thr);
affinity_thr = NULL;
affinity_count = 0;
}
else
{
i = 1;
/* We are going to compute the places/subpartitions
again from the beginning. So, we need to reinitialize
vars modified by the switch (bind) above inside
of the loop, to the state they had after the initial
switch (bind). */
switch (bind)
{
case omp_proc_bind_true:
case omp_proc_bind_close:
if (nthreads > thr->ts.place_partition_len)
/* T > P. S has been changed, so needs
to be recomputed. */
s = nthreads / thr->ts.place_partition_len;
k = 1;
p = thr->place - 1;
break;
case omp_proc_bind_master:
/* No vars have been changed. */
break;
case omp_proc_bind_spread:
p = thr->ts.place_partition_off;
if (k != 0)
{
/* T > P. */
s = nthreads / team->prev_ts.place_partition_len;
k = 1;
}
break;
}
/* Increase the barrier threshold to make sure all new
threads and all the threads we're going to let die
arrive before the team is released. */
if (affinity_count)
gomp_simple_barrier_reinit (&pool->threads_dock,
nthreads + affinity_count);
}
}
if (i == nthreads)
goto do_release;
}
if (__builtin_expect (nthreads + affinity_count > old_threads_used, 0))
{
long diff = (long) (nthreads + affinity_count) - (long) old_threads_used;
if (old_threads_used == 0)
--diff;
#ifdef HAVE_SYNC_BUILTINS
__sync_fetch_and_add (&gomp_managed_threads, diff);
#else
gomp_mutex_lock (&gomp_managed_threads_lock);
gomp_managed_threads += diff;
gomp_mutex_unlock (&gomp_managed_threads_lock);
#endif
}
attr = &gomp_thread_attr;
if (__builtin_expect (gomp_places_list != NULL, 0))
{
size_t stacksize;
pthread_attr_init (&thread_attr);
pthread_attr_setdetachstate (&thread_attr, PTHREAD_CREATE_DETACHED);
if (! pthread_attr_getstacksize (&gomp_thread_attr, &stacksize))
pthread_attr_setstacksize (&thread_attr, stacksize);
attr = &thread_attr;
}
/* Popcorn: convert alloca to malloc */
start_data = gomp_malloc (sizeof (struct gomp_thread_start_data)
* (nthreads-i));
first_start = start_data;
/* Launch new threads. */
for (; i < nthreads; ++i)
{
pthread_t pt;
int err;
start_data->ts.place_partition_off = thr->ts.place_partition_off;
start_data->ts.place_partition_len = thr->ts.place_partition_len;
start_data->place = 0;
if (__builtin_expect (gomp_places_list != NULL, 0))
{
switch (bind)
{
case omp_proc_bind_true:
case omp_proc_bind_close:
if (k == s)
{
++p;
if (p == (team->prev_ts.place_partition_off
+ team->prev_ts.place_partition_len))
p = team->prev_ts.place_partition_off;
k = 1;
if (i == nthreads - rest)
s = 1;
}
else
++k;
break;
case omp_proc_bind_master:
break;
case omp_proc_bind_spread:
if (k == 0)
{
/* T <= P. */
if (p < rest)
p += s + 1;
else
p += s;
if (p == (team->prev_ts.place_partition_off
+ team->prev_ts.place_partition_len))
p = team->prev_ts.place_partition_off;
start_data->ts.place_partition_off = p;
if (p < rest)
start_data->ts.place_partition_len = s + 1;
else
start_data->ts.place_partition_len = s;
}
else
{
/* T > P. */
if (k == s)
{
++p;
if (p == (team->prev_ts.place_partition_off
+ team->prev_ts.place_partition_len))
p = team->prev_ts.place_partition_off;
k = 1;
if (i == nthreads - rest)
s = 1;
}
else
++k;
start_data->ts.place_partition_off = p;
start_data->ts.place_partition_len = 1;
}
break;
}
start_data->place = p + 1;
if (affinity_thr != NULL && pool->threads[i] != NULL)
continue;
gomp_init_thread_affinity (attr, p);
}
start_data->fn = fn;
start_data->fn_data = data;
start_data->ts.team = team;
start_data->ts.work_share = &team->work_shares[0];
start_data->ts.last_work_share = NULL;
start_data->ts.team_id = i;
start_data->ts.level = team->prev_ts.level + 1;
start_data->ts.active_level = thr->ts.active_level;
#ifdef HAVE_SYNC_BUILTINS
start_data->ts.single_count = 0;
start_data->popcorn_created_tid =
__sync_fetch_and_add(&popcorn_created_tid, 1);
#else
if(nested) {
gomp_mutex_lock(&popcorn_tid_lock);
start_data->popcorn_created_tid = popcorn_created_tid++;
gomp_mutex_unlock(&popcorn_tid_lock);
}
else start_data->popcorn_created_tid = popcorn_created_tid++;
#endif
start_data->ts.static_trip = 0;
start_data->task = &team->implicit_task[i];
if (popcorn_place)
start_data->popcorn_nid = hierarchy_assign_node(i);
gomp_init_task (start_data->task, task, icv);
team->implicit_task[i].icv.nthreads_var = nthreads_var;
team->implicit_task[i].icv.bind_var = bind_var;
start_data->thread_pool = pool;
start_data->nested = nested;
attr = gomp_adjust_thread_attr (attr, &thread_attr);
err = pthread_create (&pt, attr, gomp_thread_start, start_data++);
if (err != 0)
gomp_fatal ("Thread creation failed: %s", strerror (err));
}
if (__builtin_expect (attr == &thread_attr, 0))
pthread_attr_destroy (&thread_attr);
do_release:
/* Re-initialize per-node & global data structures before releasing threads */
if (popcorn_place)
{
nodes = 0;
for (nid = 0; nid < MAX_POPCORN_NODES; nid++)
{
if (popcorn_node[nid].sync.num)
{
hierarchy_init_node(nid);
nodes++;
}
}
hierarchy_init_global(nodes);
}
if (nested)
gomp_barrier_wait (&team->barrier);
else
gomp_simple_barrier_wait (&pool->threads_dock);
/* Decrease the barrier threshold to match the number of threads
that should arrive back at the end of this team. The extra
threads should be exiting. Note that we arrange for this test
to never be true for nested teams. If AFFINITY_COUNT is non-zero,
the barrier as well as gomp_managed_threads was temporarily
set to NTHREADS + AFFINITY_COUNT. For NTHREADS < OLD_THREADS_COUNT,
AFFINITY_COUNT if non-zero will be always at least
OLD_THREADS_COUNT - NTHREADS. */
if (__builtin_expect (nthreads < old_threads_used, 0)
|| __builtin_expect (affinity_count, 0))
{
long diff = (long) nthreads - (long) old_threads_used;
if (affinity_count)
diff = -affinity_count;
gomp_simple_barrier_reinit (&pool->threads_dock, nthreads);
#ifdef HAVE_SYNC_BUILTINS
__sync_fetch_and_add (&gomp_managed_threads, diff);
#else
gomp_mutex_lock (&gomp_managed_threads_lock);
gomp_managed_threads += diff;
gomp_mutex_unlock (&gomp_managed_threads_lock);
#endif
}
/* Popcorn: removed alloca, always needs to be freed */
if (__builtin_expect (affinity_thr != NULL, 0)
/*&& team->prev_ts.place_partition_len > 64*/)
free (affinity_thr);
/* Popcorn: converted alloca to malloc, needs to be freed */
if (first_start) free (first_start);
}
#endif
/* Terminate the current team. This is only to be called by the master
thread. We assume that we must wait for the other threads. */
void
gomp_team_end (void)
{
struct gomp_thread *thr = gomp_thread ();
struct gomp_team *team = thr->ts.team;
/* This barrier handles all pending explicit threads.
As #pragma omp cancel parallel might get awaited count in
team->barrier in a inconsistent state, we need to use a different
counter here. */
if(popcorn_global.hybrid_barrier)
hierarchy_hybrid_barrier_final (thr->popcorn_nid, "End parallel");
else
gomp_team_barrier_wait_final (&team->barrier);
if (__builtin_expect (team->team_cancelled, 0))
{
struct gomp_work_share *ws = team->work_shares_to_free;
do
{
struct gomp_work_share *next_ws = gomp_ptrlock_get (&ws->next_ws);
if (next_ws == NULL)
gomp_ptrlock_set (&ws->next_ws, ws);
gomp_fini_work_share (ws);
ws = next_ws;
}
while (ws != NULL);
}
else
gomp_fini_work_share (thr->ts.work_share);
gomp_end_task ();
thr->ts = team->prev_ts;
if (__builtin_expect (thr->ts.team != NULL, 0))
{
#ifdef HAVE_SYNC_BUILTINS
__sync_fetch_and_add (&gomp_managed_threads, 1L - team->nthreads);
#else
gomp_mutex_lock (&gomp_managed_threads_lock);
gomp_managed_threads -= team->nthreads - 1L;
gomp_mutex_unlock (&gomp_managed_threads_lock);
#endif
/* This barrier has gomp_barrier_wait_last counterparts
and ensures the team can be safely destroyed. */
gomp_barrier_wait (&team->barrier);
}
if (__builtin_expect (team->work_shares[0].next_alloc != NULL, 0))
{
struct gomp_work_share *ws = team->work_shares[0].next_alloc;
do
{
struct gomp_work_share *next_ws = ws->next_alloc;
free (ws);
ws = next_ws;
}
while (ws != NULL);
}
gomp_sem_destroy (&team->master_release);
if (__builtin_expect (thr->ts.team != NULL, 0)
|| __builtin_expect (team->nthreads == 1, 0))
free_team (team);
else
{
struct gomp_thread_pool *pool = thr->thread_pool;
if (pool->last_team)
free_team (pool->last_team);
pool->last_team = team;
gomp_release_thread_pool (pool);
}
}
#ifdef LIBGOMP_USE_PTHREADS
/* Constructors for this file. */
#if !defined HAVE_TLS && !defined USE_EMUTLS
static struct gomp_thread initial_thread_tls_data;
#endif
static void __attribute__((constructor))
initialize_team (void)
{
#if !defined HAVE_TLS && !defined USE_EMUTLS
pthread_key_create (&gomp_tls_key, NULL);
pthread_setspecific (gomp_tls_key, &initial_thread_tls_data);
#endif
if (pthread_key_create (&gomp_thread_destructor, gomp_free_thread) != 0)
gomp_fatal ("could not create thread pool destructor.");
}
static void __attribute__((destructor))
team_destructor (void)
{
/* Without this dlclose on libgomp could lead to subsequent
crashes. */
pthread_key_delete (gomp_thread_destructor);
}
static void __attribute__((destructor))
popcorn_destructor (void)
{
if(popcorn_profiling) fclose(popcorn_prof_fp);
}
#endif
struct gomp_task_icv *
gomp_new_icv (void)
{
struct gomp_thread *thr = gomp_thread ();
struct gomp_task *task = gomp_malloc (sizeof (struct gomp_task));
gomp_init_task (task, NULL, &gomp_global_icv);
thr->task = task;
#ifdef LIBGOMP_USE_PTHREADS
pthread_setspecific (gomp_thread_destructor, thr);
#endif
return &task->icv;
}
|
FDTCC.c | /*-----------------Fast double-difference cross-correlation (FDTCC)-----------
Min Liu & Miao Zhang
m.liu@dal.ca & miao.zhang@dal.ca
Dalhouise University
Nov. 15 2019
# create the dt.cc file from raw continuous SAC file or cut SAC file
#
# -------|-----------|------------------|------
# (wb) pick (wa)
#
# For P phase CC, if your "wa" is larger than 0.9*(ts-tp), it will be replaced
# by 0.9*(ts-tp) to make sure you don't include S phase.
# For S phase CC, if your "wb" is larger than 0.5*(ts-tp), it will be replaced
# by 0.5*(ts-tp) to make sure you don't include P phase.
---------------------------------------------------------------------------*/
#include "sac.h"
#include <math.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
// define name of ouput files
#define INPUT1 "Input.p"
#define INPUT2 "Input.s1"
#define INPUT3 "Input.s2"
#define OUTPUT "dt.cc"
#define SAC_BANDPASS "BP"
#define SAC_BUTTERWORTH "BU"
typedef struct picks {
char sta[8];
char phase[5];
float arr1;
float arr2;
double ccv;
float shift;
int quality;
float diff;
} PICK;
typedef struct event_pair {
int event1;
int event2;
PICK* pk;
} PAIR;
typedef struct stationinfo {
float stlo;
float stla;
char net[5];
char sta[8];
char comp[4];
float elev;
} STATION;
typedef struct eventinfo {
char date[10];
char time[10];
double sec;
float evlo;
float evla;
float evdp;
int event;
float* pSNR;
float* sSNR;
} EVENT;
typedef struct phases {
char sta[10];
char phase[5];
} PHASE;
typedef struct phaseinfo {
float* time;
PHASE* pa;
} PHASEINF;
typedef struct ttable {
double gdist;
double dep;
double ptime;
double stime;
double prayp;
double srayp;
double phslow;
double shslow;
char pphase[10];
char sphase[10];
} TTT;
// declaration of function
void SubccS(PAIR*, float**, float**, int*, int, int);
void SubccP(PAIR*, float**, int*, int, int);
void Correct_Sshift(PAIR*, float*, char**, int*);
void Correct_Pshift(PAIR*, float*, char**, int*);
void Transfer_sec(EVENT*, int);
void Cal_tt(PAIR*, PAIR*, EVENT*, STATION*);
void Search_event(PAIR*, EVENT*, int*, int);
void ddistaz(double, double, double, double, double*);
void Cal_sSNR(float**, float**, int*);
void Cal_pSNR(float**, int*);
void Replace(PAIR*, PHASEINF*, int, int);
void taper(float*, int, float, float);
void rtrend(float*, int);
void bpcc(float*, SACHEAD, float, float);
void xapiir(float*, int, char*, double, double, int, char*, double, double, double, int);
TTT* TB;
STATION* ST;
PAIR* PO;
PAIR* PT;
EVENT* EVE;
PHASEINF* PIN;
// globe parameters
int NP = 1700000;
int NS = 120;
int NE = 30000;
int np; // number of event pairs
int ns; // number of stations
int ne; // number of events
int ntb = 100000; // number of lines in ttt
float wb;
float wa;
float wf;
float wbs;
float was;
float wfs;
float delta = 0.01;
float threshold = 0.5;
float trx;
float tdx;
float tdh;
float trh;
float thre_SNR = 1.0;
float thre_shift = 1.5;
float timezone = 0 * 3600; //not tested
int main(int argc, char** argv)
{
FILE *fp1, *fp2, *fp3;
int i, j, error = 0;
float low, high;
char lines[500], flag[10];
char staDir[100], tttDir[100], wavDir[100], eveDir[100], dctDir[100],
paDir[100];
float jk, jk1, jk2, jk3, jk4, jk5, jk6, jk7, jk8, jk9, jk10, jk11, jk12, jk13;
int ID_event;
int k = 0, kk = 0;
int f = -3;
char **staP, **staS1, **staS2;
float *ptriger, *s1triger, *s2triger;
float **waveP, **waveS1, **waveS2;
int *labelP, *labelS1, *labelS2, *markP, *markS1, *markS2;
double memory_require;
char **la_staP, **la_staS1, **la_staS2;
SACHEAD hd1, hd2, hd3;
int size, npp, nss;
int ife, ifd, ifp;
extern int np, ne, ns, ntb;
extern float threshold, delta, trx, tdx, trh, tdh, wa, wb, wf, was, wbs, wfs;
// read parametets
for (i = 1; !error && i < argc; i++) {
if (argv[i][0] == '-') {
switch (argv[i][1]) {
case 'C':
sscanf(&argv[i][2], "%d/%d/%d", &ife, &ifd, &ifp);
case 'W':
sscanf(&argv[i][2], "%f/%f/%f/%f/%f/%f", &wb, &wa, &wf, &wbs, &was,
&wfs);
break;
case 'D':
sscanf(&argv[i][2], "%f/%f/%f/%f", &delta, &threshold, &thre_SNR,
&thre_shift);
break;
case 'G':
sscanf(&argv[i][2], "%f/%f/%f/%f", &trx, &trh, &tdx, &tdh);
break;
case 'B':
sscanf(&argv[i][2], "%f/%f", &low, &high);
break;
case 'F':
sscanf(&argv[i][2], "%d", &f);
break;
default:
error = 1;
break;
}
}
}
if (argc < 9 || error == 1) {
fprintf(stderr, "Usage: Fast double-difference cross-correlation (FDTCC) \n");
fprintf(stderr, " Create the dt.cc file for hypoDD, tomoDD, Growclust, etc.");
fprintf(stderr, "(Authors: Min Liu & Miao Zhang)\n");
fprintf(stderr,
" FDTCC -C(ife/ifd/ifp) -W(wb/wa/wf/wbs/was/wfs) "
"-D(delta/threshold/thre_SNR/thre_shift)\n"
" -G(trx/trh/tdx/tdh) -B(low/high) -F(f)\n");
fprintf(stderr, " "
"-------------------explanation------------------\n");
fprintf(stderr, " -C: specify the path of event.sel, dt.ct and "
"phase.dat (1: yes, 0: default names)\n");
fprintf(stderr, " -W: waveform window length before and after "
"picks and their maximum shift length\n");
fprintf(stderr, " -D: sampling interval, CC threshold, SNR threshold, max arrival time diff of the pick pair\n");
fprintf(stderr, " -G: ranges and grids in horizontal direction and depth (in traveltime table)\n");
fprintf(stderr, " -F: input data format (0: continuous data; 1: event segments)\n");
fprintf(stderr, " -B: waveform bandpass filtering (e.g., 2/8; "
"-1/-1: no filter applied).\n");
fprintf(stderr, " SAC name format: date/net.sta.comp, e.g., 20210101/AA.BBBB.HHZ\n"
" or eventID/net.sta.comop, e.g., 8/AA.BBBB.HHZ).\n");
exit(-1);
}
strcpy(staDir, argv[7]);
strcpy(tttDir, argv[8]);
strcpy(wavDir, argv[9]);
if (ife == 1)
strcpy(eveDir, argv[10]);
else
strcpy(eveDir, "./event.sel");
if (ifd == 1)
strcpy(dctDir, argv[11]);
else
strcpy(dctDir, "./dt.ct");
if (ifp == 1)
strcpy(paDir, argv[12]);
else
strcpy(paDir, "./phase.dat");
// read phase inforamtion phase.dat
PIN = (PHASEINF*)malloc(sizeof(PHASEINF) * (NE));
for (i = 0; i < NE; i++) {
PIN[i].pa = (PHASE*)malloc(sizeof(PHASE) * (NS * 2));
PIN[i].time = (float*)malloc(sizeof(float) * (NS * 2));
}
if ((fp1 = fopen(paDir, "r")) == NULL) {
fprintf(stderr, "Unable to open phasefile\n");
exit(-1);
}
while (fgets(lines, 150, fp1) != NULL) {
sscanf(lines, "%s", flag);
if (strcmp(flag, "#") == 0) {
sscanf(lines, "%s %f %f %f %f %f %f %f %f %f %f %f %f %f %d", flag, &jk2,
&jk3, &jk4, &jk5, &jk6, &jk7, &jk8, &jk9, &jk10, &jk11, &jk12,
&jk13, &jk13, &ID_event);
j = 0;
} else {
sscanf(lines, "%s %f %f %s", PIN[ID_event].pa[j].sta,
&PIN[ID_event].time[j], &jk, PIN[ID_event].pa[j].phase);
j++;
}
}
fclose(fp1);
// read observed event pairs (dt.ct)
PO = (PAIR*)malloc(sizeof(PAIR) * (NP));
for (i = 0; i < NP; i++) {
PO[i].pk = (PICK*)malloc(sizeof(PICK) * (NS * 2));
}
if ((fp1 = fopen(dctDir, "r")) == NULL) {
fprintf(stderr, "Unable to open dt.ct\n");
exit(-1);
}
j = 0;
k = 0;
while (fgets(lines, 100, fp1) != NULL) {
sscanf(lines, "%s", flag);
if (strcmp(flag, "#") == 0) {
sscanf(lines, "%s %d %d", flag, &PO[k].event1, &PO[k].event2);
k++;
if (k > NP) {
fprintf(stderr, "Number of event-pairs exceeds the preset NP, please "
"recompile with a larger NP!\n");
exit(-1);
}
kk = 0;
} else {
sscanf(lines, "%s %.2f %.2f %f %s", PO[k - 1].pk[kk].sta,
&PO[k - 1].pk[kk].arr1, &PO[k - 1].pk[kk].arr2, &jk,
PO[k - 1].pk[kk].phase);
kk++;
j++;
if (kk > NS) {
fprintf(stderr, "Number of stations exceeds the preset NS, please "
"recompile with a larger NS!\n");
exit(-1);
}
}
}
fclose(fp1);
np = k;
printf(" FDTCC reads %d event-pairs\n", np);
// read stations (REAL format)
if ((fp1 = fopen(staDir, "r")) == NULL) {
fprintf(stderr, "Unable to open stations\n");
exit(-1);
}
ST = (STATION*)malloc(sizeof(STATION) * NS);
k = 0;
while (fgets(lines, 100, fp1) != NULL) {
sscanf(lines, "%f %f %s %s %s %f", &ST[k].stlo, &ST[k].stla, ST[k].net,
ST[k].sta, ST[k].comp, &ST[k].elev);
k++;
if (k > NS) {
fprintf(stderr, "Number of stations exceeds the preset NS, please "
"recompile with a larger NS!\n");
exit(-1);
}
}
ns = k;
fclose(fp1);
printf(" FDTCC reads %d stations\n", k);
// read event information (event.sel)
if ((fp1 = fopen(eveDir, "r")) == NULL) {
fprintf(stderr, "Unable to open event.sel\n");
exit(-1);
}
EVE = (EVENT*)malloc(sizeof(EVENT) * NE);
for (i = 0; i < NE; i++) {
EVE[i].pSNR = (float*)malloc(sizeof(float) * ns);
EVE[i].sSNR = (float*)malloc(sizeof(float) * ns);
}
k = 0;
while (fgets(lines, 100, fp1) != NULL) {
sscanf(lines, "%s %s %f %f %f %f %f %f %f %d", EVE[k].date, EVE[k].time,
&EVE[k].evla, &EVE[k].evlo, &EVE[k].evdp, &jk, &jk1, &jk2, &jk3,
&EVE[k].event);
if (EVE[k].evdp > trh) {
fprintf(stderr, "event out of the travel-time table, please update it\n");
fprintf(stderr,
" maximum distance and depth are %f and %f, respectively\n", trx,
trh);
exit(-1);
}
k++;
if (k > NE) {
fprintf(stderr, "Number of events exceeds the preset NE, please "
"recompile with a larger NE!\n");
exit(-1);
}
}
ne = k;
printf(" FDTCC reads %d events\n", k);
Transfer_sec(EVE, ne);
fclose(fp1);
// read tt table (REAL format)
if ((fp1 = fopen(tttDir, "r")) == NULL) {
fprintf(stderr, "Unable to open tttDir\n");
exit(-1);
}
TB = (TTT*)malloc(sizeof(TTT) * ntb);
k = 0;
while (fgets(lines, 300, fp1) != NULL) {
sscanf(lines, "%lf %lf %lf %lf %lf %lf %lf %lf %s %s", &TB[k].gdist,
&TB[k].dep, &TB[k].ptime, &TB[k].stime, &TB[k].prayp, &TB[k].srayp,
&TB[k].phslow, &TB[k].shslow, TB[k].pphase, TB[k].sphase);
k++;
if (k > ntb) {
fprintf(stderr, "Line of travel-time exceeds the preset ntb, please "
"recompile with a larger ntb!\n");
exit(-1);
}
}
printf(" FDTCC reads %d travel-times\n", k);
// memory check
npp = (int)(((wa + wb + 2)) / delta + 1);
nss = (int)(((was + wbs + 2)) / delta + 1);
size = ne * ns * 3;
memory_require = ((size / (1024.0 * 1024.0 * 1024.0)) * npp) * sizeof(float);
printf(" Memory require > %.2lf GB.\n", memory_require);
// creat event pairs with theoretical travel times
printf(" Creating database... \n");
PT = (PAIR*)malloc(sizeof(PAIR) * np);
for (i = 0; i < np; i++) {
PT[i].pk = (PICK*)malloc(sizeof(PICK) * (ns * 2));
}
Cal_tt(PO, PT, EVE, ST);
// update PT based on PO
#pragma omp parallel for shared(PT, PIN, np, ns) private(i, j)
for (i = 0; i < np; i++) {
for (j = 0; j < 2 * ns; j++) {
Replace(PT, PIN, i, j);
}
}
#pragma omp barrier
// read wavefrom
fp1 = fopen(INPUT1, "w");
fp2 = fopen(INPUT2, "w");
fp3 = fopen(INPUT3, "w");
if (fp1 == NULL || fp2 == NULL || fp3 == NULL) {
fprintf(stderr, "Can't open INPUT files\n");
exit(-1);
}
for (i = 0; i < ne; i++) {
for (j = 0; j < ns; j++) {
if (f == 0) {
fprintf(fp1, "%s/%s/%s.%s.%c%cZ %s %.2lf %d\n", wavDir,
EVE[i].date, ST[j].net, ST[j].sta, ST[j].comp[0], ST[j].comp[1],
ST[j].sta, EVE[i].sec, EVE[i].event);
fprintf(fp2, "%s/%s/%s.%s.%c%cE %s %.2lf %d\n", wavDir,
EVE[i].date, ST[j].net, ST[j].sta, ST[j].comp[0], ST[j].comp[1],
ST[j].sta, EVE[i].sec, EVE[i].event);
fprintf(fp3, "%s/%s/%s.%s.%c%cN %s %.2lf %d\n", wavDir,
EVE[i].date, ST[j].net, ST[j].sta, ST[j].comp[0], ST[j].comp[1],
ST[j].sta, EVE[i].sec, EVE[i].event);
} else if (f == 1) {
fprintf(fp1, "%s/%d/%s.%s.%c%cZ %s 0.0 %d\n", wavDir,
EVE[i].event, ST[j].net, ST[j].sta, ST[j].comp[0], ST[j].comp[1],
ST[j].sta, EVE[i].event);
fprintf(fp2, "%s/%d/%s.%s.%c%cE %s 0.0 %d\n", wavDir,
EVE[i].event, ST[j].net, ST[j].sta, ST[j].comp[0], ST[j].comp[1],
ST[j].sta, EVE[i].event);
fprintf(fp3, "%s/%d/%s.%s.%c%cN %s 0.0 %d\n", wavDir,
EVE[i].event, ST[j].net, ST[j].sta, ST[j].comp[0], ST[j].comp[1],
ST[j].sta, EVE[i].event);
}
}
}
fclose(fp1);
fclose(fp2);
fclose(fp3);
ptriger = (float*)malloc(ne * ns * sizeof(float));
s1triger = (float*)malloc(ne * ns * sizeof(float));
s2triger = (float*)malloc(ne * ns * sizeof(float));
staP = (char**)malloc(sizeof(char*) * ne * ns);
staS1 = (char**)malloc(sizeof(char*) * ne * ns);
staS2 = (char**)malloc(sizeof(char*) * ne * ns);
la_staP = (char**)malloc(sizeof(char*) * ne * ns);
la_staS1 = (char**)malloc(sizeof(char*) * ne * ns);
la_staS2 = (char**)malloc(sizeof(char*) * ne * ns);
for (i = 0; i < ne * ns; i++) {
staP[i] = (char*)malloc(sizeof(char) * 256);
staS1[i] = (char*)malloc(sizeof(char) * 256);
staS2[i] = (char*)malloc(sizeof(char) * 256);
la_staP[i] = (char*)malloc(sizeof(char) * 10);
la_staS1[i] = (char*)malloc(sizeof(char) * 10);
la_staS2[i] = (char*)malloc(sizeof(char) * 10);
}
waveP = (float**)calloc(ne * ns, sizeof(float*));
for (i = 0; i < ne * ns; i++)
waveP[i] = (float*)calloc(npp, sizeof(float));
waveS1 = (float**)calloc(ne * ns, sizeof(float*));
for (i = 0; i < ne * ns; i++)
waveS1[i] = (float*)calloc(nss, sizeof(float));
waveS2 = (float**)calloc(ne * ns, sizeof(float*));
for (i = 0; i < ne * ns; i++)
waveS2[i] = (float*)calloc(nss, sizeof(float));
labelP = (int*)malloc(ne * ns * sizeof(int));
labelS1 = (int*)malloc(ne * ns * sizeof(int));
labelS2 = (int*)malloc(ne * ns * sizeof(int));
markP = (int*)malloc(ne * ns * sizeof(int));
markS1 = (int*)malloc(ne * ns * sizeof(int));
markS2 = (int*)malloc(ne * ns * sizeof(int));
fp1 = fopen(INPUT1, "r");
fp2 = fopen(INPUT2, "r");
fp3 = fopen(INPUT3, "r");
if (fp1 == NULL || fp2 == NULL || fp3 == NULL) {
fprintf(stderr, "Can't open INPUT files\n");
exit(-1);
}
for (i = 0; i < ne * ns; i++) {
fscanf(fp1, "%s %s %f %d", staP[i], la_staP[i], &ptriger[i], &labelP[i]);
fscanf(fp2, "%s %s %f %d", staS1[i], la_staS1[i], &s1triger[i],
&labelS1[i]);
fscanf(fp3, "%s %s %f %d", staS2[i], la_staS2[i], &s2triger[i],
&labelS2[i]);
}
Correct_Pshift(PT, ptriger, la_staP, labelP);
Correct_Sshift(PT, s1triger, la_staS1, labelS1);
Correct_Sshift(PT, s2triger, la_staS2, labelS2);
#pragma omp parallel for shared(waveP, waveS1, waveS2, was, wbs, staP, ptriger, s1triger, s2triger, low, high, timezone, wb, wa, markP, markS1, markS2, np, ns) private(hd1, hd2, hd3, i, j)
for (i = 0; i < ne * ns; i++) {
markP[i] = 1;
markS1[i] = 1;
markS2[i] = 1;
if ((waveP[i] = read_sac2(staP[i], &hd1, -3, ptriger[i] - timezone - wb - 1,
ptriger[i] - timezone + wa + 1))
== NULL) {
markP[i] = 0;
//fprintf(stderr,"no station %s\n",staP[i]);
} else if (low > 0 && high > 0) {
bpcc(waveP[i], hd1, low, high);
}
//in case user want to check waveform
//char tmp[100];
//sprintf(tmp,"%d/%s.%s.%c%cZ",EVE[i/ns].event,ST[i%ns].net, ST[i%ns].sta, ST[i%ns].comp[0],ST[i%ns].comp[1]);
//printf("%d/%s.%s.%c%cZ\n",EVE[i/ns].event,ST[i%ns].net, ST[i%ns].sta, ST[i%ns].comp[0],ST[i%ns].comp[1]);
//if(markP[i]==1)write_sac(tmp,hd1,waveP[i]);
if ((waveS1[i] = read_sac2(staS1[i], &hd2, -3, s1triger[i] - timezone - wbs - 1,
s1triger[i] - timezone + was + 1))
== NULL) {
markS1[i] = 0;
//fprintf(stderr,"no station %s\n",staS1[i]);
} else if (low > 0 && high > 0) {
bpcc(waveS1[i], hd2, low, high);
}
if ((waveS2[i] = read_sac2(staS2[i], &hd3, -3, s2triger[i] - wbs - timezone - 1,
s2triger[i] + was + timezone + 1))
== NULL) {
markS2[i] = 0;
//fprintf(stderr,"no station %s\n",staS2[i]);
} else if (low > 0 && high > 0) {
bpcc(waveS2[i], hd3, low, high);
}
}
#pragma omp barrier
Cal_sSNR(waveS1, waveS2, markS1);
Cal_pSNR(waveP, markP);
fclose(fp1);
fclose(fp2);
fclose(fp3);
// calculate cc
printf(" FDTCC starts to calculate ccv\n");
int pair_point[2];
#pragma omp parallel for shared(PT, EVE, waveP) private(i, j, pair_point)
for (i = 0; i < np; i++) {
Search_event(PT, EVE, pair_point, i);
for (j = 0; j < ns; j++) {
if (markP[pair_point[0] * ns + j] == 0 || markP[pair_point[1] * ns + j] == 0 || EVE[pair_point[0]].pSNR[j] <= thre_SNR || EVE[pair_point[1]].pSNR[j] <= thre_SNR) {
PT[i].pk[2 * j].quality = 0;
continue;
}
PT[i].pk[2 * j].quality = 1;
SubccP(PT, waveP, pair_point, i, j);
}
}
#pragma omp barrier
#pragma omp parallel for shared(PT, EVE, waveS1, waveS2) private(i, j, \
pair_point)
for (i = 0; i < np; i++) {
Search_event(PT, EVE, pair_point, i);
for (j = 0; j < ns; j++) {
if (markS1[pair_point[0] * ns + j] == 0 || markS1[pair_point[1] * ns + j] == 0 || EVE[pair_point[0]].sSNR[j] <= thre_SNR || EVE[pair_point[1]].sSNR[j] <= thre_SNR) {
PT[i].pk[2 * j + 1].quality = 0;
continue;
}
PT[i].pk[2 * j + 1].quality = 1;
SubccS(PT, waveS1, waveS2, pair_point, i, j);
}
}
#pragma omp barrier
// output dt.cc
fp1 = fopen(OUTPUT, "w");
for (i = 0; i < np; i++) {
fprintf(fp1, "# %d %d 0\n", PT[i].event1, PT[i].event2);
for (j = 0; j < 2 * ns; j++) {
if (PT[i].pk[j].quality == 1 && PT[i].pk[j].ccv >= threshold && PT[i].pk[j].ccv > 0) {
fprintf(fp1, "%5s %10.4f %10.2lf %3s\n", PT[i].pk[j].sta,
PT[i].pk[j].arr1 - PT[i].pk[j].arr2 + PT[i].pk[j].shift,
PT[i].pk[j].ccv, PT[i].pk[j].phase);
}
}
}
fclose(fp1);
printf(" Results were written in dt.cc\n");
// free memory
for (i = 0; i < NP; i++) {
free(PO[i].pk);
}
for (i = 0; i < np; i++) {
free(PT[i].pk);
}
free(PT);
free(PO);
free(ST);
free(EVE);
free(TB);
for (i = 0; i < ne * ns; i++) {
free(staP[i]);
free(staS1[i]);
free(staS2[i]);
free(waveP[i]);
free(waveS1[i]);
free(waveS2[i]);
free(la_staP[i]);
free(la_staS1[i]);
free(la_staS2[i]);
}
free(la_staP);
free(la_staS1);
free(la_staS2);
free(staP);
free(staS1);
free(staS2);
free(waveP);
free(waveS1);
free(waveS2);
free(ptriger);
free(s1triger);
free(s2triger);
free(labelP);
free(labelS1);
free(labelS2);
for (i = 0; i < NE; i++) {
free(PIN[i].pa);
free(PIN[i].time);
}
free(PIN);
}
// transfer hms to sec
void Transfer_sec(EVENT* EVE, int ne)
{
int i;
int h, m, s, ms;
for (i = 0; i < ne; i++) {
switch (strlen(EVE[i].time)) {
case 1:
sscanf(EVE[i].time, "%1d", &ms);
EVE[i].sec = ((double)((ms))) / 100.0;
break;
case 2:
sscanf(EVE[i].time, "%2d", &ms);
EVE[i].sec = ((double)(ms)) / 100.0;
break;
case 3:
sscanf(EVE[i].time, "%1d%2d", &s, &ms);
EVE[i].sec = ((double)((s)*100 + ms)) / 100.0;
break;
case 4:
sscanf(EVE[i].time, "%2d%2d", &s, &ms);
EVE[i].sec = ((double)((s)*100 + ms)) / 100;
break;
case 5:
sscanf(EVE[i].time, "%1d%2d%2d", &m, &s, &ms);
EVE[i].sec = ((double)((m * 60 + s) * 100 + ms)) / 100;
break;
case 6:
sscanf(EVE[i].time, "%2d%2d%2d", &m, &s, &ms);
EVE[i].sec = ((double)((m * 60 + s) * 100 + ms)) / 100;
break;
case 7:
sscanf(EVE[i].time, "%1d%2d%2d%2d", &h, &m, &s, &ms);
EVE[i].sec = ((double)((h * 3600 + m * 60 + s) * 100 + ms)) / 100;
break;
case 8:
sscanf(EVE[i].time, "%2d%2d%2d%2d", &h, &m, &s, &ms);
EVE[i].sec = ((double)((h * 3600 + m * 60 + s) * 100 + ms)) / 100;
break;
default:
printf("Wrong time point\n");
break;
}
}
}
void Cal_tt(PAIR* PO, PAIR* PT, EVENT* EVE, STATION* ST)
{
int i, j, ih, ig, k;
extern float trx, tdx, tdh;
extern int np, ns;
int event[2];
extern TTT* TB;
double GCarc1, GCarc2;
#pragma omp parallel for shared(PO, PT, EVE, ST, TB, np, ns, trx, tdx, tdh) private(i, j, ih, ig, k, event, GCarc1, GCarc2)
for (i = 0; i < np; i++) {
Search_event(PO, EVE, event, i);
PT[i].event1 = PO[i].event1;
PT[i].event2 = PO[i].event2;
k = 0;
for (j = 0; j < ns; j++) {
strcpy(PT[i].pk[k].sta, ST[j].sta);
strcpy(PT[i].pk[k + 1].sta, ST[j].sta);
strcpy(PT[i].pk[k].phase, "P");
strcpy(PT[i].pk[k + 1].phase, "S");
ddistaz(ST[j].stla, ST[j].stlo, EVE[event[0]].evla, EVE[event[0]].evlo, &GCarc1);
ih = rint(EVE[event[0]].evdp / tdh);
ig = ih * rint(trx / tdx) + rint(GCarc1 / tdx);
PT[i].pk[k].arr1 = TB[ig].ptime + (GCarc1 - TB[ig].gdist) * TB[ig].prayp + (EVE[event[0]].evdp - TB[ig].dep) * TB[ig].phslow;
PT[i].pk[k + 1].arr1 = TB[ig].stime + (GCarc1 - TB[ig].gdist) * TB[ig].srayp + (EVE[event[0]].evdp - TB[ig].dep) * TB[ig].shslow;
ddistaz(ST[j].stla, ST[j].stlo, EVE[event[1]].evla, EVE[event[1]].evlo, &GCarc2);
ih = rint(EVE[event[1]].evdp / tdh);
ig = ih * rint(trx / tdx) + rint(GCarc2 / tdx);
PT[i].pk[k].arr2 = TB[ig].ptime + (GCarc2 - TB[ig].gdist) * TB[ig].prayp + (EVE[event[1]].evdp - TB[ig].dep) * TB[ig].phslow;
PT[i].pk[k + 1].arr2 = TB[ig].stime + (GCarc2 - TB[ig].gdist) * TB[ig].srayp + (EVE[event[1]].evdp - TB[ig].dep) * TB[ig].shslow;
PT[i].pk[k].diff = PT[i].pk[k].arr2 - PT[i].pk[k].arr1;
PT[i].pk[k + 1].diff = PT[i].pk[k + 1].arr2 - PT[i].pk[k + 1].arr1;
k = k + 2;
}
}
#pragma omp barrier
}
void Search_event(PAIR* PO, EVENT* EVE, int* serial, int n)
{
int i;
extern int ne;
for (i = 0; i < ne; i++) {
if (EVE[i].event == PO[n].event1) {
serial[0] = i;
break;
}
}
for (i = 0; i < ne; i++) {
if (EVE[i].event == PO[n].event2) {
serial[1] = i;
break;
}
}
}
/* * Modified by M. Zhang
c Subroutine to calculate the Great Circle Arc distance
c between two sets of geographic coordinates
c
c Given: stalat => Latitude of first point (+N, -S) in degrees
c stalon => Longitude of first point (+E, -W) in degrees
c evtlat => Latitude of second point
c evtlon => Longitude of second point
c
c Returns: delta => Great Circle Arc distance in degrees
c az => Azimuth from pt. 1 to pt. 2 in degrees
c baz => Back Azimuth from pt. 2 to pt. 1 in degrees
c
c If you are calculating station-epicenter pairs, pt. 1 is the station
c
c Equations take from Bullen, pages 154, 155
c
c T. Owens, September 19, 1991
c Sept. 25 -- fixed az and baz calculations
c
P. Crotwell, Setember 27, 1994
Converted to c to fix annoying problem of fortran giving wrong
answers if the input doesn't contain a decimal point.
*/
void ddistaz(double stalat, double stalon, double evtlat, double evtlon,
double* delta)
{
// double stalat, stalon, evtlat, evtlon;
// double delta, az, baz;
double scolat, slon, ecolat, elon;
double a, b, c, d, e, aa, bb, cc, dd, ee, g, gg, h, hh, k, kk;
double rhs1, rhs2, sph, rad, del, daz, az, dbaz, pi, piby2;
/*
stalat = atof(argv[1]);
stalon = atof(argv[2]);
evtlat = atof(argv[3]);
evtlon = atof(argv[4]);
*/
pi = 3.141592654;
piby2 = pi / 2.0;
rad = 2. * pi / 360.0;
sph = 1.0 / 298.257;
scolat = piby2 - atan((1. - sph) * (1. - sph) * tan(stalat * rad));
ecolat = piby2 - atan((1. - sph) * (1. - sph) * tan(evtlat * rad));
slon = stalon * rad;
elon = evtlon * rad;
a = sin(scolat) * cos(slon);
b = sin(scolat) * sin(slon);
c = cos(scolat);
d = sin(slon);
e = -cos(slon);
g = -c * e;
h = c * d;
k = -sin(scolat);
aa = sin(ecolat) * cos(elon);
bb = sin(ecolat) * sin(elon);
cc = cos(ecolat);
dd = sin(elon);
ee = -cos(elon);
gg = -cc * ee;
hh = cc * dd;
kk = -sin(ecolat);
del = acos(a * aa + b * bb + c * cc);
*delta = del / rad; // delta
}
void Correct_Pshift(PAIR* PT, float* a, char** b, int* c)
{
int i, j, k;
#pragma omp parallel for shared(PT, a, b, c, ne, ns, np) private(i, j, k)
for (i = 0; i < ne * ns; i++) {
for (j = 0; j < np; j++) {
if (PT[j].event1 == c[i]) {
for (k = 0; k < 2 * ns; k++) {
if (strcmp(PT[j].pk[k].sta, b[i]) == 0 && strcmp(PT[j].pk[k].phase, "P") == 0) {
a[i] = a[i] + PT[j].pk[k].arr1;
break;
}
}
break;
}
if (PT[j].event2 == c[i]) {
for (k = 0; k < 2 * ns; k++) {
if (strcmp(PT[j].pk[k].sta, b[i]) == 0 && strcmp(PT[j].pk[k].phase, "P") == 0) {
a[i] = a[i] + PT[j].pk[k].arr2;
break;
}
}
break;
}
}
}
#pragma omp barrier
}
void Correct_Sshift(PAIR* PT, float* a, char** b, int* c)
{
int i, j, k;
#pragma omp parallel for shared(PT, a, b, c, ne, ns, np) private(i, j, k)
for (i = 0; i < ne * ns; i++) {
for (j = 0; j < np; j++) {
if (PT[j].event1 == c[i]) {
for (k = 0; k < 2 * ns; k++) {
if (strcmp(PT[j].pk[k].sta, b[i]) == 0 && strcmp(PT[j].pk[k].phase, "S") == 0) {
a[i] = a[i] + PT[j].pk[k].arr1;
break;
}
}
break;
}
if (PT[j].event2 == c[i]) {
for (k = 0; k < 2 * ns; k++) {
if (strcmp(PT[j].pk[k].sta, b[i]) == 0 && strcmp(PT[j].pk[k].phase, "S") == 0) {
a[i] = a[i] + PT[j].pk[k].arr2;
break;
}
}
break;
}
}
}
#pragma omp barrier
}
void SubccP(PAIR* PT, float** waveP, int* a, int i, int j)
{
int k, kk, Npoint, Wpoint, ref_shift;
float s_p;
double cc, norm, normMaster, tmp;
extern int ns;
extern float delta, wa, wb, wf, thre_shift;
float w, wa1;
int t_shift = (int)(1/delta);
s_p = PT[i].pk[2 * j + 1].arr1 - PT[i].pk[2 * j].arr1;
if (s_p <= 0)
PT[i].pk[2 * j].quality = 0;
else {
wa1 = wa;
w = wf;
if (wa > 0.9 * s_p)
wa1 = 0.9 * s_p;
if (w > 0.5 * (wa1 + wb))
w = 0.5 * (wa1 + wb);
ref_shift = (int)(w / delta);
w = ref_shift * delta;
Npoint = (int)(2 * w / delta - 0.5);
Wpoint = (int)((wa1 + wb) / delta - 0.5);
PT[i].pk[2 * j].ccv = 0;
PT[i].pk[2 * j].shift = 0;
normMaster = 0.0;
norm = 0.0;
for (k = 0; k <= Wpoint; k++) {
norm += waveP[a[1] * ns + j][k + t_shift] * waveP[a[1] * ns + j][k + t_shift];
normMaster += waveP[a[0] * ns + j][k + t_shift] * waveP[a[0] * ns + j][k + t_shift];
}
for (k = 0; k <= Npoint; k++) {
cc = 0.0;
if (k <= ref_shift) {
for (kk = ref_shift - k; kk <= Wpoint; kk++) {
cc += waveP[a[0] * ns + j][kk - ref_shift + k + t_shift] * waveP[a[1] * ns + j][kk + t_shift];
}
} else {
for (kk = 0; kk <= Wpoint - (k - ref_shift); kk++) {
cc += waveP[a[0] * ns + j][kk + k - ref_shift + t_shift] * waveP[a[1] * ns + j][kk + t_shift];
}
}
tmp = cc / (sqrt(norm) * sqrt(normMaster));
if (fabs(tmp) > PT[i].pk[2 * j].ccv) {
PT[i].pk[2 * j].ccv = fabs(tmp);
PT[i].pk[2 * j].shift = k * delta - w;
}
}
if (fabs(PT[i].pk[2 * j].arr1 - PT[i].pk[2 * j].arr2 + PT[i].pk[2 * j].shift) > thre_shift) {
PT[i].pk[2 * j].quality = 0;
}
}
}
void SubccS(PAIR* PT, float** waveS1, float** waveS2, int* a, int i, int j)
{
int Npoint, Wpoint, k, kk, ref_shift;
float s_p;
double cc1, norm1, normMaster1, cc2, norm2, normMaster2, tmp;
extern int ns;
extern float wbs, was, delta, wfs, thre_shift;
int tt;
int t_shift = (int)(1/delta);
float w, wbs1;
s_p = PT[i].pk[2 * j + 1].arr1 - PT[i].pk[2 * j].arr1;
if (s_p <= 0)
PT[i].pk[2 * j + 1].quality = 0;
else {
wbs1 = wbs;
w = wfs;
tt = 0;
if (wbs1 > 0.5 * s_p) {
wbs1 = 0.5 * s_p;
tt = (int)((wbs - 0.5 * s_p) / delta - 0.5);
}
if (w > 0.5 * (was + wbs1)) {
w = 0.5 * (was + wbs1);
}
ref_shift = (int)(w / delta);
w = ref_shift * delta;
Npoint = (int)(2 * w / delta - 0.5);
Wpoint = (int)((was + wbs1) / delta - 0.5);
PT[i].pk[2 * j + 1].ccv = 0;
PT[i].pk[2 * j + 1].shift = 0;
normMaster1 = 0.0;
norm1 = 0.0;
normMaster2 = 0.0;
norm2 = 0.0;
for (k = 0; k <= Wpoint; k++) {
norm1 += waveS1[a[1] * ns + j][tt + k +t_shift] * waveS1[a[1] * ns + j][tt + k +t_shift];
normMaster1 += waveS1[a[0] * ns + j][tt + k + t_shift] * waveS1[a[0] * ns + j][tt + k + t_shift];
norm2 += waveS2[a[1] * ns + j][tt + k + t_shift] * waveS2[a[1] * ns + j][k + tt + t_shift];
normMaster2 += waveS2[a[0] * ns + j][tt + k + t_shift] * waveS2[a[0] * ns + j][tt + k + t_shift];
}
for (k = 0; k <= Npoint; k++) {
cc1 = 0.0;
cc2 = 0.0;
if (k <= ref_shift) {
for (kk = ref_shift - k; kk <= Wpoint; kk++) {
cc1 += waveS1[a[0] * ns + j][tt + kk - ref_shift + k + t_shift] * waveS1[a[1] * ns + j][tt + kk + t_shift];
cc2 += waveS2[a[0] * ns + j][tt + kk - ref_shift + k + t_shift] * waveS2[a[1] * ns + j][tt + kk + t_shift];
}
} else {
for (kk = 0; kk <= Wpoint - (k - ref_shift); kk++) {
cc1 += waveS1[a[0] * ns + j][tt + kk - ref_shift + k + t_shift] * waveS1[a[1] * ns + j][tt + kk + t_shift];
cc2 += waveS2[a[0] * ns + j][tt + kk - ref_shift + k + t_shift] * waveS2[a[1] * ns + j][tt + kk + t_shift];
}
}
tmp = ((cc1 / (sqrt(norm1) * sqrt(normMaster1))) + (cc2 / (sqrt(norm2) * sqrt(normMaster2)))) / 2;
if (fabs(tmp) > PT[i].pk[2 * j + 1].ccv) {
PT[i].pk[2 * j + 1].ccv = fabs(tmp);
PT[i].pk[2 * j + 1].shift = k * delta - w;
}
}
if (fabs(PT[i].pk[2 * j + 1].arr1 - PT[i].pk[2 * j + 1].arr2 + PT[i].pk[2 * j + 1].shift) > thre_shift)
PT[i].pk[2 * j + 1].quality = 0;
}
}
void Cal_pSNR(float** wave, int* mark)
{
extern float delta, wa, wb;
extern int ns, ne;
int i, j, k;
double s, n;
int t_shift = (int)(1/delta);
int spoint, npoint;
extern EVENT* EVE;
spoint = (int)(wa / delta - 0.5);
npoint = (int)(wb / delta - 0.5);
#pragma omp parallel for shared(EVE, wave, mark, t_shift, spoint, npoint, ne, ns) private(i, j, k, s, n)
for (i = 0; i < ne; i++) {
for (k = 0; k < ns; k++) {
if (mark[k + i * ns] == 0) {
EVE[i].pSNR[k] = 0;
continue;
}
s = 0;
n = 0;
for (j = 0; j < spoint; j++)
s += wave[i * ns + k][j + npoint + t_shift] * wave[i * ns + k][j + npoint + t_shift];
for (j = 0; j < npoint; j++)
n += wave[i * ns + k][j + t_shift] * wave[i * ns + k][j + t_shift];
EVE[i].pSNR[k] = (s / spoint) / (n / npoint);
}
}
#pragma omp barrier
}
void Cal_sSNR(float** wave1, float** wave2, int* mark)
{
extern float delta, was, wbs;
extern int ns, ne;
int i, j, k;
int t_shift = (int)(1/delta);
double s1, n1, s2, n2;
int spoint, npoint;
extern EVENT* EVE;
spoint = (int)(was / delta - 0.5);
npoint = (int)(wbs / delta - 0.5);
#pragma omp parallel for shared(EVE, wave1, wave2, mark, t_shift, spoint, npoint, ne, ns) private(i, j, k, s1, s2, n1, n2)
for (i = 0; i < ne; i++) {
for (k = 0; k < ns; k++) {
if (mark[i * ns + k] == 0) {
EVE[i].sSNR[k] = 0;
continue;
}
s1 = 0;
n1 = 0;
s2 = 0;
n2 = 0;
for (j = 0; j < spoint; j++)
s1 += wave1[i * ns + k][j + npoint + t_shift] * wave1[i * ns + k][j + npoint + t_shift];
for (j = 0; j < npoint; j++)
n1 += wave1[i * ns + k][j + t_shift] * wave1[i * ns + k][j + t_shift];
for (j = 0; j < spoint; j++)
s2 += wave2[i * ns + k][j + npoint + t_shift] * wave2[i * ns + k][j + npoint + t_shift];
for (j = 0; j < npoint; j++)
n2 += wave2[i * ns + k][j + t_shift] * wave2[i * ns + k][j + t_shift];
EVE[i].sSNR[k] = 0.5 * ((s1 / spoint) / (n1 / npoint) + (s2 / spoint) / (n2 / npoint));
}
}
#pragma omp barrier
}
void Replace(PAIR* PT, PHASEINF* PIN, int a, int b)
{
int i, j;
extern int np, ns;
for (i = 0; i < 2 * ns; i++) {
if (strcmp(PT[a].pk[b].sta, PIN[PT[a].event1].pa[i].sta) == 0 && strcmp(PT[a].pk[b].phase, PIN[PT[a].event1].pa[i].phase) == 0) {
PT[a].pk[b].arr1 = PIN[PT[a].event1].time[i];
break;
}
}
for (i = 0; i < 2 * ns; i++) {
if (strcmp(PT[a].pk[b].sta, PIN[PT[a].event2].pa[i].sta) == 0 && strcmp(PT[a].pk[b].phase, PIN[PT[a].event2].pa[i].phase) == 0) {
PT[a].pk[b].arr2 = PIN[PT[a].event2].time[i];
break;
}
}
}
//hanning taper before bandpass filter
void taper(float* yarray, int nlen, float start, float end)
{
float ang, cs;
int m1, m2, m3, m4, m5;
int i, j, k, xi;
m1 = (int)(nlen * start + 0.5);
m2 = m1 + 1;
ang = 3.1415926 / (float)(m1);
for (i = 0; i <= m1; i++) {
xi = i;
cs = (1 - cos(xi * ang)) / 2.0;
yarray[i] = yarray[i] * cs;
}
m3 = (int)(nlen * end + 0.5);
m5 = nlen - m3 - 1;
m4 = m5 + 1;
ang = 3.1415926 / (float)(m3);
for (k = m2; k <= m5; k++) {
yarray[k] = yarray[k];
}
for (j = m4; j < nlen; j++) {
xi = j + 1 - nlen;
cs = (1 - cos(xi * ang)) / 2.0;
yarray[j] = yarray[j] * cs;
}
}
/* remove trend a*i + b */
void rtrend(float* y, int n)
{
int i;
double a, b, a11, a12, a22, y1, y2;
y1 = y2 = 0.;
for (i = 0; i < n; i++) {
y1 += i * y[i];
y2 += y[i];
}
a12 = 0.5 * n * (n - 1);
a11 = a12 * (2 * n - 1) / 3.;
a22 = n;
b = a11 * a22 - a12 * a12;
a = (a22 * y1 - a12 * y2) / b;
b = (a11 * y2 - a12 * y1) / b;
for (i = 0; i < n; i++) {
y[i] = y[i] - a * i - b;
}
}
//do bandpass filtering for templates and traces
void bpcc(float* yarray, SACHEAD hd, float low, float high)
{
/* Local variables */
//float low, high;
double attenuation, transition_bandwidth;
int nlen;
//SACHEAD hd;
double delta_d;
int order;
//float *yarray;
int passes;
float total, sum, mean, taperb;
int j;
sum = 0.0;
//rmean
for (j = 0; j < hd.npts; j++) {
sum += yarray[j];
}
for (j = 0; j < hd.npts; j++) {
yarray[j] = yarray[j] - sum / hd.npts;
}
delta_d = hd.delta;
nlen = hd.npts;
//rtrend
rtrend(yarray, nlen);
/*taper function*/
//taper(yarray,hd.npts,0.05,0.05); //sac default hanning window 0.05
taperb = 0.0001;
if (hd.npts < 20000) {
taperb = 0.01;
}
taper(yarray, hd.npts, taperb, taperb);
passes = 2;
order = 4;
transition_bandwidth = 0.0;
attenuation = 0.0;
xapiir(yarray, nlen, SAC_BUTTERWORTH, transition_bandwidth, attenuation, order, SAC_BANDPASS, low, high, delta_d, passes);
// write_sac("test.sac",hd,yarray);
// exit(-1);
}
|
exercice2.c |
#include <stdio.h>
int main(void){
#pragma omp parallel for
for (int i = 0; i < 100; ++i){
#pragma omp critical{
printf("Je suis #%d\n", i);
}
}
return 0;
}
|
GB_binop__div_int8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__div_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__div_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__div_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__div_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__div_int8)
// A*D function (colscale): GB (_AxD__div_int8)
// D*A function (rowscale): GB (_DxB__div_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__div_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__div_int8)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__div_int8)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__div_int8)
// C=scalar+B GB (_bind1st__div_int8)
// C=scalar+B' GB (_bind1st_tran__div_int8)
// C=A+scalar GB (_bind2nd__div_int8)
// C=A'+scalar GB (_bind2nd_tran__div_int8)
// C type: int8_t
// A type: int8_t
// A pattern? 0
// B type: int8_t
// B pattern? 0
// BinaryOp: cij = GB_IDIV_SIGNED (aij, bij, 8)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IDIV_SIGNED (x, y, 8) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_DIV || GxB_NO_INT8 || GxB_NO_DIV_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__div_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__div_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__div_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__div_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__div_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__div_int8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__div_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int8_t alpha_scalar ;
int8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int8_t *) alpha_scalar_in)) ;
beta_scalar = (*((int8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__div_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__div_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__div_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__div_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__div_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IDIV_SIGNED (x, bij, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__div_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IDIV_SIGNED (aij, y, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_SIGNED (x, aij, 8) ; \
}
GrB_Info GB (_bind1st_tran__div_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_SIGNED (aij, y, 8) ; \
}
GrB_Info GB (_bind2nd_tran__div_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__eq_uint16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__eq_uint16)
// A.*B function (eWiseMult): GB (_AemultB_08__eq_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__eq_uint16)
// A.*B function (eWiseMult): GB (_AemultB_04__eq_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_uint16)
// A*D function (colscale): GB (_AxD__eq_uint16)
// D*A function (rowscale): GB (_DxB__eq_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__eq_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__eq_uint16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_uint16)
// C=scalar+B GB (_bind1st__eq_uint16)
// C=scalar+B' GB (_bind1st_tran__eq_uint16)
// C=A+scalar GB (_bind2nd__eq_uint16)
// C=A'+scalar GB (_bind2nd_tran__eq_uint16)
// C type: bool
// A type: uint16_t
// A pattern? 0
// B type: uint16_t
// B pattern? 0
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x == y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_EQ || GxB_NO_UINT16 || GxB_NO_EQ_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__eq_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__eq_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__eq_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__eq_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__eq_uint16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__eq_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint16_t alpha_scalar ;
uint16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__eq_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__eq_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__eq_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__eq_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__eq_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__eq_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB (_bind1st_tran__eq_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB (_bind2nd_tran__eq_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
image_random-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file image_random-inl.h
* \brief
* \author
*/
#ifndef MXNET_OPERATOR_IMAGE_IMAGE_RANDOM_INL_H_
#define MXNET_OPERATOR_IMAGE_IMAGE_RANDOM_INL_H_
#include <algorithm>
#include <cmath>
#include <limits>
#include <tuple>
#include <utility>
#include <vector>
#include "mxnet/base.h"
#include "../mxnet_op.h"
#include "../operator_common.h"
#if MXNET_USE_OPENCV
#include <opencv2/opencv.hpp>
#endif // MXNET_USE_OPENCV
namespace mxnet {
namespace op {
namespace image {
using namespace mshadow;
#if MXNET_USE_CUDA
// NOTE: Kernel launch/map was extremely costly.
// Hence, we use separate CUDA kernels for these operators.
template<typename DType, typename T1, typename T2>
void ToTensorImplCUDA(mshadow::Stream<gpu> *s,
const T1 input,
const T2 output,
const int req,
const float normalize_factor);
template<typename DType>
void NormalizeImplCUDA(mshadow::Stream<gpu> *s,
const DType *input,
DType *output,
const int req,
const int N,
const int C,
const int H,
const int W,
const float mean_d0,
const float mean_d1,
const float mean_d2,
const float std_d0,
const float std_d1,
const float std_d2);
template<typename DType>
void NormalizeBackwardImplCUDA(mshadow::Stream<gpu> *s,
const DType *out_grad,
DType *in_grad,
const int req,
const int N,
const int C,
const int H,
const int W,
const float std_d0,
const float std_d1,
const float std_d2);
#endif // MXNET_USE_CUDA
// Shape and Type inference for image to tensor operator
inline bool ToTensorShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
mxnet::TShape &shp = (*in_attrs)[0];
if (!shape_is_known(shp)) return false;
CHECK((shp.ndim() == 3) || (shp.ndim() == 4))
<< "Input image must have shape (height, width, channels), or "
<< "(N, height, width, channels) but got " << shp;
if (shp.ndim() == 3) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::TShape({shp[2], shp[0], shp[1]}));
} else if (shp.ndim() == 4) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::TShape({shp[0], shp[3], shp[1], shp[2]}));
}
return true;
}
inline bool ToTensorType(const nnvm::NodeAttrs& attrs,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
TYPE_ASSIGN_CHECK(*out_attrs, 0, mshadow::kFloat32);
return (*in_attrs)[0] != -1;
}
// Operator Implementation
template<typename DType, int req>
inline void ToTensor(float* out_data, const DType* in_data,
const int length,
const int channels,
const float normalize_factor,
const int step) {
// Microsoft Visual C++ compiler does not support omp collapse
#ifdef _MSC_VER
#pragma omp parallel for
#else
#pragma omp parallel for collapse(2)
#endif // _MSC_VER
for (int c = 0; c < channels; ++c) {
for (int i = 0; i < length; ++i) {
KERNEL_ASSIGN(out_data[step + c*length + i], req,
(in_data[step + i*channels + c]) / normalize_factor);
}
}
}
inline void ToTensorImpl(const std::vector<TBlob> &inputs,
const std::vector<TBlob> &outputs,
const std::vector<OpReqType> &req,
const int length,
const int channel,
const float normalize_factor,
const int step) {
MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
float* output = outputs[0].dptr<float>();
DType* input = inputs[0].dptr<DType>();
ToTensor<DType, req_type>(output, input, length, channel,
normalize_factor, step);
});
});
}
template<typename xpu>
void ToTensorOpForward(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
// We do not use temp buffer when performance the operation.
// Hence, this check is necessary.
CHECK_EQ(req[0], kWriteTo)
<< "`to_tensor` does not support inplace updates";
const float normalize_factor = 255.0f;
if (std::is_same<xpu, gpu>::value) {
#if MXNET_USE_CUDA
mshadow::Stream<gpu> *s = ctx.get_stream<gpu>();
MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
if (inputs[0].ndim() == 3) {
Tensor<gpu, 3, DType> input = inputs[0].get<gpu, 3, DType>(s);
Tensor<gpu, 3, float> output = outputs[0].get<gpu, 3, float>(s);
ToTensorImplCUDA<DType, Tensor<gpu, 3, DType>, Tensor<gpu, 3, float>>
(s, input, output, req_type, normalize_factor);
} else {
Tensor<gpu, 4, DType> input = inputs[0].get<gpu, 4, DType>(s);
Tensor<gpu, 4, float> output = outputs[0].get<gpu, 4, float>(s);
ToTensorImplCUDA<DType, Tensor<gpu, 4, DType>, Tensor<gpu, 4, float>>
(s, input, output, req_type, normalize_factor);
}
});
});
#else
LOG(FATAL) << "Compile with USE_CUDA=1 to use ToTensor operator on GPU.";
#endif // MXNET_USE_CUDA
} else if (inputs[0].ndim() == 3) {
// 3D Input - (h, w, c)
const int length = inputs[0].shape_[0] * inputs[0].shape_[1];
const int channel = static_cast<int>(inputs[0].shape_[2]);
const int step = 0;
ToTensorImpl(inputs, outputs, req, length,
channel, normalize_factor, step);
} else if (inputs[0].ndim() == 4) {
// 4D input (n, h, w, c)
const int batch_size = inputs[0].shape_[0];
const int length = inputs[0].shape_[1] * inputs[0].shape_[2];
const int channel = static_cast<int>(inputs[0].shape_[3]);
const int step = channel * length;
#pragma omp parallel for
for (auto n = 0; n < batch_size; ++n) {
ToTensorImpl(inputs, outputs, req, length, channel,
normalize_factor, n*step);
}
}
}
struct NormalizeParam : public dmlc::Parameter<NormalizeParam> {
mxnet::Tuple<float> mean;
mxnet::Tuple<float> std;
DMLC_DECLARE_PARAMETER(NormalizeParam) {
DMLC_DECLARE_FIELD(mean)
.set_default(mxnet::Tuple<float> {0.0f, 0.0f, 0.0f, 0.0f})
.describe("Sequence of means for each channel. "
"Default value is 0.");
DMLC_DECLARE_FIELD(std)
.set_default(mxnet::Tuple<float> {1.0f, 1.0f, 1.0f, 1.0f})
.describe("Sequence of standard deviations for each channel. "
"Default value is 1.");
}
};
// Shape and Type inference for image Normalize operator
// Shape inference
inline bool NormalizeOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const NormalizeParam ¶m = nnvm::get<NormalizeParam>(attrs.parsed);
const auto& dshape = (*in_attrs)[0];
if (!dshape.ndim()) return false;
CHECK((dshape.ndim() == 3) || (dshape.ndim() == 4))
<< "Input tensor must have shape (channels, height, width), or "
<< "(N, channels, height, width), but got " << dshape;
int nchannels = 0;
if (dshape.ndim() == 3) {
nchannels = dshape[0];
CHECK(nchannels == 3 || nchannels == 1)
<< "The first dimension of input tensor must be the channel dimension with "
<< "either 1 or 3 elements, but got input with shape " << dshape;
} else if (dshape.ndim() == 4) {
nchannels = dshape[1];
CHECK(nchannels == 3 || nchannels == 1)
<< "The second dimension of input tensor must be the channel dimension with "
<< "either 1 or 3 elements, but got input with shape " << dshape;
}
CHECK((param.mean.ndim() == 1) || (param.mean.ndim() == nchannels))
<< "Invalid mean for input with shape " << dshape
<< ". mean must have either 1 or " << nchannels
<< " elements, but got " << param.mean;
CHECK(param.std.ndim() == 1 || param.std.ndim() == nchannels)
<< "Invalid std for input with shape " << dshape
<< ". std must have either 1 or " << nchannels
<< " elements, but got " << param.std;
SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape);
return true;
}
// Type Inference
inline bool NormalizeOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0));
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0));
return out_attrs->at(0) != -1;
}
template<typename DType, int req>
inline void Normalize(DType* out_data,
const DType* in_data,
const int length,
const int channels,
const int step,
const std::vector<float> mean,
const std::vector<float> std) {
// Microsoft Visual C++ compiler does not support omp collapse
#ifdef _MSC_VER
#pragma omp parallel for
#else
#pragma omp parallel for collapse(2)
#endif // _MSC_VER
for (int c = 0; c < channels; ++c) {
for (int i = 0; i < length; ++i) {
KERNEL_ASSIGN(out_data[step + c*length + i], req,
(in_data[step + c*length + i] - mean[c]) / std[c]);
}
}
}
inline void NormalizeImpl(const std::vector<TBlob> &inputs,
const std::vector<TBlob> &outputs,
const std::vector<OpReqType> &req,
const int length,
const int channels,
const int step,
const std::vector<float> mean,
const std::vector<float> std) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
DType* input = inputs[0].dptr<DType>();
DType* output = outputs[0].dptr<DType>();
Normalize<DType, req_type>(output, input, length, channels, step,
mean, std);
});
});
}
template<typename xpu>
void NormalizeOpForward(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
const NormalizeParam ¶m = nnvm::get<NormalizeParam>(attrs.parsed);
// Mean and Std can be 1 or 3D only.
std::vector<float> mean(3);
std::vector<float> std(3);
if (param.mean.ndim() == 1) {
mean[0] = mean[1] = mean[2] = param.mean[0];
} else {
mean[0] = param.mean[0];
mean[1] = param.mean[1];
mean[2] = param.mean[2];
}
if (param.std.ndim() == 1) {
std[0] = std[1] = std[2] = param.std[0];
} else {
std[0] = param.std[0];
std[1] = param.std[1];
std[2] = param.std[2];
}
if (std::is_same<xpu, gpu>::value) {
#if MXNET_USE_CUDA
mshadow::Stream<gpu> *s = ctx.get_stream<gpu>();
MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
int N, C, H, W;
DType *input = nullptr;
DType *output = nullptr;
if (inputs[0].ndim() == 3) {
N = 1;
C = static_cast<int>(inputs[0].shape_[0]);
H = static_cast<int>(inputs[0].shape_[1]);
W = static_cast<int>(inputs[0].shape_[2]);
input = (inputs[0].get<gpu, 3, DType>(s)).dptr_;
output = (outputs[0].get<gpu, 3, DType>(s)).dptr_;
} else {
N = static_cast<int>(inputs[0].shape_[0]);
C = static_cast<int>(inputs[0].shape_[1]);
H = static_cast<int>(inputs[0].shape_[2]);
W = static_cast<int>(inputs[0].shape_[3]);
input = (inputs[0].get<gpu, 4, DType>(s)).dptr_;
output = (outputs[0].get<gpu, 4, DType>(s)).dptr_;
}
NormalizeImplCUDA<DType>(s, input, output, req_type,
N, C, H, W,
mean[0], mean[1], mean[2],
std[0], std[1], std[2]);
});
});
#else
LOG(FATAL) << "Compile with USE_CUDA=1 to use Normalize operator on GPU.";
#endif // MXNET_USE_CUDA
} else if (inputs[0].ndim() == 3) {
// 3D input (c, h, w)
const int length = inputs[0].shape_[1] * inputs[0].shape_[2];
const int channel = static_cast<int>(inputs[0].shape_[0]);
const int step = 0;
NormalizeImpl(inputs, outputs, req, length, channel, step, mean, std);
} else if (inputs[0].ndim() == 4) {
// 4D input (n, c, h, w)
const int batch_size = inputs[0].shape_[0];
const int length = inputs[0].shape_[2] * inputs[0].shape_[3];
const int channel = static_cast<int>(inputs[0].shape_[1]);
const int step = channel * length;
#pragma omp parallel for
for (auto n = 0; n < batch_size; ++n) {
NormalizeImpl(inputs, outputs, req, length, channel, n*step, mean, std);
}
}
}
// Backward function
template<typename DType, int req>
inline void NormalizeBackward(const DType* out_grad,
DType* in_grad,
const int length,
const int channels,
const int step,
const std::vector<float> std) {
// Microsoft Visual C++ compiler does not support omp collapse
#ifdef _MSC_VER
#pragma omp parallel for
#else
#pragma omp parallel for collapse(2)
#endif // _MSC_VER
for (int c = 0; c < channels; ++c) {
for (int i = 0; i < length; ++i) {
KERNEL_ASSIGN(in_grad[step + c*length + i], req,
out_grad[step + c*length + i] * (1.0 / std[c]));
}
}
}
inline void NormalizeBackwardImpl(const std::vector<TBlob> &inputs,
const std::vector<TBlob> &outputs,
const std::vector<OpReqType> &req,
const int length,
const int channels,
const int step,
const std::vector<float> std
) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
DType* out_grad = inputs[0].dptr<DType>();
DType* in_grad = outputs[0].dptr<DType>();
NormalizeBackward<DType, req_type>(out_grad, in_grad, length,
channels, step, std);
});
});
}
template<typename xpu>
void NormalizeOpBackward(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
const NormalizeParam ¶m = nnvm::get<NormalizeParam>(attrs.parsed);
// Std can be 1 or 3D only.
std::vector<float> std(3);
if (param.std.ndim() == 1) {
std[0] = std[1] = std[2] = param.std[0];
} else {
std[0] = param.std[0];
std[1] = param.std[1];
std[2] = param.std[2];
}
// Note: inputs[0] is out_grad
const TBlob& in_data = inputs[1];
if (std::is_same<xpu, gpu>::value) {
#if MXNET_USE_CUDA
mshadow::Stream<gpu> *s = ctx.get_stream<gpu>();
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
int N, C, H, W;
DType *in_grad = nullptr;
DType *out_grad = nullptr;
if (in_data.ndim() == 3) {
N = 1;
C = static_cast<int>(in_data.shape_[0]);
H = static_cast<int>(in_data.shape_[1]);
W = static_cast<int>(in_data.shape_[2]);
out_grad = (inputs[0].get<gpu, 3, DType>(s)).dptr_;
in_grad = (outputs[0].get<gpu, 3, DType>(s)).dptr_;
} else {
N = static_cast<int>(in_data.shape_[0]);
C = static_cast<int>(in_data.shape_[1]);
H = static_cast<int>(in_data.shape_[2]);
W = static_cast<int>(in_data.shape_[3]);
out_grad = (inputs[0].get<gpu, 4, DType>(s)).dptr_;
in_grad = (outputs[0].get<gpu, 4, DType>(s)).dptr_;
}
NormalizeBackwardImplCUDA<DType>(s, out_grad, in_grad, req_type,
N, C, H, W,
std[0], std[1], std[2]);
});
});
#else
LOG(FATAL) << "Compile with USE_CUDA=1 to use Normalize backward operator on GPU.";
#endif // MXNET_USE_CUDA
} else if (in_data.ndim() == 3) {
// 3D input (c, h, w)
const int length = in_data.shape_[1] * in_data.shape_[2];
const int channel = static_cast<int>(in_data.shape_[0]);
const int step = 0;
NormalizeBackwardImpl(inputs, outputs, req, length, channel, step, std);
} else if (in_data.ndim() == 4) {
// 4D input (n, c, h, w)
const int batch_size = in_data.shape_[0];
const int length = in_data.shape_[2] * in_data.shape_[3];
const int channel = static_cast<int>(in_data.shape_[1]);
const int step = channel * length;
#pragma omp parallel for
for (auto n = 0; n < batch_size; ++n) {
NormalizeBackwardImpl(inputs, outputs, req, length, channel, n*step, std);
}
}
}
template<typename DType>
inline DType saturate_cast(const float& src) {
return static_cast<DType>(src);
}
template<>
inline uint8_t saturate_cast(const float& src) {
return std::min(std::max(src, 0.f), 255.f);
}
inline bool ImageShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
mxnet::TShape& dshape = (*in_attrs)[0];
CHECK_EQ(dshape.ndim(), 3)
<< "Input image must have shape (height, width, channels), but got " << dshape;
auto nchannels = dshape[dshape.ndim()-1];
CHECK(nchannels == 3 || nchannels == 1)
<< "The last dimension of input image must be the channel dimension with "
<< "either 1 or 3 elements, but got input with shape " << dshape;
SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape);
return true;
}
template<typename DType, int axis>
void FlipImpl(const mxnet::TShape &shape, DType *src, DType *dst) {
int head = 1, mid = shape[axis], tail = 1;
for (int i = 0; i < axis; ++i) head *= shape[i];
for (int i = axis+1; i < shape.ndim(); ++i) tail *= shape[i];
for (int i = 0; i < head; ++i) {
for (int j = 0; j < (mid >> 1); ++j) {
int idx1 = (i*mid + j) * tail;
int idx2 = idx1 + (mid-(j << 1)-1) * tail;
for (int k = 0; k < tail; ++k, ++idx1, ++idx2) {
DType tmp = src[idx1];
dst[idx1] = src[idx2];
dst[idx2] = tmp;
}
}
}
}
inline void FlipLeftRight(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
FlipImpl<DType, 1>(inputs[0].shape_, inputs[0].dptr<DType>(),
outputs[0].dptr<DType>());
});
}
inline void FlipTopBottom(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
FlipImpl<DType, 0>(inputs[0].shape_, inputs[0].dptr<DType>(),
outputs[0].dptr<DType>());
});
}
inline void RandomFlipLeftRight(
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, float>(s);
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
if (std::bernoulli_distribution()(prnd->GetRndEngine())) {
if (outputs[0].dptr_ != inputs[0].dptr_) {
std::memcpy(outputs[0].dptr_, inputs[0].dptr_, inputs[0].Size() * sizeof(DType));
}
} else {
FlipImpl<DType, 1>(inputs[0].shape_, inputs[0].dptr<DType>(),
outputs[0].dptr<DType>());
}
});
}
inline void RandomFlipTopBottom(
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, float>(s);
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
if (std::bernoulli_distribution()(prnd->GetRndEngine())) {
if (outputs[0].dptr_ != inputs[0].dptr_) {
std::memcpy(outputs[0].dptr_, inputs[0].dptr_, inputs[0].Size() * sizeof(DType));
}
} else {
FlipImpl<DType, 0>(inputs[0].shape_, inputs[0].dptr<DType>(),
outputs[0].dptr<DType>());
}
});
}
struct RandomEnhanceParam : public dmlc::Parameter<RandomEnhanceParam> {
float min_factor;
float max_factor;
DMLC_DECLARE_PARAMETER(RandomEnhanceParam) {
DMLC_DECLARE_FIELD(min_factor)
.set_lower_bound(0.0)
.describe("Minimum factor.");
DMLC_DECLARE_FIELD(max_factor)
.set_lower_bound(0.0)
.describe("Maximum factor.");
}
};
inline void AdjustBrightnessImpl(const float& alpha_b,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
int length = inputs[0].Size();
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
DType* output = outputs[0].dptr<DType>();
DType* input = inputs[0].dptr<DType>();
for (int l = 0; l < length; ++l) {
float val = static_cast<float>(input[l]) * alpha_b;
output[l] = saturate_cast<DType>(val);
}
});
}
inline void RandomBrightness(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomEnhanceParam ¶m = nnvm::get<RandomEnhanceParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, float>(s);
float alpha_b = std::uniform_real_distribution<float>(
param.min_factor, param.max_factor)(prnd->GetRndEngine());
AdjustBrightnessImpl(alpha_b, ctx, inputs, req, outputs);
}
inline void AdjustContrastImpl(const float& alpha_c,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
static const float coef[] = { 0.299f, 0.587f, 0.114f };
int length = inputs[0].shape_[0] * inputs[0].shape_[1];
int nchannels = inputs[0].shape_[2];
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
DType* output = outputs[0].dptr<DType>();
DType* input = inputs[0].dptr<DType>();
float sum = 0.f;
if (nchannels > 1) {
for (int l = 0; l < length; ++l) {
for (int c = 0; c < 3; ++c) sum += input[l*3 + c] * coef[c];
}
} else {
for (int l = 0; l < length; ++l) sum += input[l];
}
float gray_mean = sum / static_cast<float>(length);
float beta = (1 - alpha_c) * gray_mean;
for (int l = 0; l < length * nchannels; ++l) {
float val = input[l] * alpha_c + beta;
output[l] = saturate_cast<DType>(val);
}
});
}
inline void RandomContrast(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomEnhanceParam ¶m = nnvm::get<RandomEnhanceParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, real_t>(s);
float alpha_c = std::uniform_real_distribution<float>(
param.min_factor, param.max_factor)(prnd->GetRndEngine());
AdjustContrastImpl(alpha_c, ctx, inputs, req, outputs);
}
inline void AdjustSaturationImpl(const float& alpha_s,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
static const float coef[] = { 0.299f, 0.587f, 0.114f };
int length = inputs[0].shape_[0] * inputs[0].shape_[1];
int nchannels = inputs[0].shape_[2];
float alpha_o = 1.f - alpha_s;
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
DType* output = outputs[0].dptr<DType>();
DType* input = inputs[0].dptr<DType>();
if (nchannels == 1) {
for (int l = 0; l < length; ++l) output[l] = input[l];
return;
}
for (int l = 0; l < length; ++l) {
float gray = 0.f;
for (int c = 0; c < 3; ++c) {
gray = input[l*3 + c] * coef[c];
}
gray *= alpha_o;
for (int c = 0; c < 3; ++c) {
float val = gray + input[l*3 + c] * alpha_s;
output[l*3 + c] = saturate_cast<DType>(val);
}
}
});
}
inline void RandomSaturation(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomEnhanceParam ¶m = nnvm::get<RandomEnhanceParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, real_t>(s);
float alpha_s = std::uniform_real_distribution<float>(
param.min_factor, param.max_factor)(prnd->GetRndEngine());
AdjustSaturationImpl(alpha_s, ctx, inputs, req, outputs);
}
inline void RGB2HLSConvert(const float& src_r,
const float& src_g,
const float& src_b,
float *dst_h,
float *dst_l,
float *dst_s) {
float b = src_b / 255.f, g = src_g / 255.f, r = src_r / 255.f;
float h = 0.f, s = 0.f, l;
float vmin;
float vmax;
float diff;
vmax = vmin = r;
vmax = std::fmax(vmax, g);
vmax = std::fmax(vmax, b);
vmin = std::fmin(vmin, g);
vmin = std::fmin(vmin, b);
diff = vmax - vmin;
l = (vmax + vmin) * 0.5f;
if (diff > std::numeric_limits<float>::epsilon()) {
s = (l < 0.5f) * diff / (vmax + vmin);
s += (l >= 0.5f) * diff / (2.0f - vmax - vmin);
diff = 60.f / diff;
h = (vmax == r) * (g - b) * diff;
h += (vmax != r && vmax == g) * ((b - r) * diff + 120.f);
h += (vmax != r && vmax != g) * ((r - g) * diff + 240.f);
h += (h < 0.f) * 360.f;
}
*dst_h = h;
*dst_l = l;
*dst_s = s;
}
inline void HLS2RGBConvert(const float& src_h,
const float& src_l,
const float& src_s,
float *dst_r,
float *dst_g,
float *dst_b) {
static const int c_HlsSectorData[6][3] = {
{ 1, 3, 0 },
{ 1, 0, 2 },
{ 3, 0, 1 },
{ 0, 2, 1 },
{ 0, 1, 3 },
{ 2, 1, 0 }
};
float h = src_h, l = src_l, s = src_s;
float b = l, g = l, r = l;
if (s != 0) {
float p2 = (l <= 0.5f) * l * (1 + s);
p2 += (l > 0.5f) * (l + s - l * s);
float p1 = 2 * l - p2;
h *= 1.f / 60.f;
if (h < 0) {
do { h += 6; } while (h < 0);
} else if (h >= 6) {
do { h -= 6; } while (h >= 6);
}
int sector = static_cast<int>(h);
h -= sector;
float tab[4];
tab[0] = p2;
tab[1] = p1;
tab[2] = p1 + (p2 - p1) * (1 - h);
tab[3] = p1 + (p2 - p1) * h;
b = tab[c_HlsSectorData[sector][0]];
g = tab[c_HlsSectorData[sector][1]];
r = tab[c_HlsSectorData[sector][2]];
}
*dst_b = b * 255.f;
*dst_g = g * 255.f;
*dst_r = r * 255.f;
}
inline void AdjustHueImpl(float alpha,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
int length = inputs[0].shape_[0] * inputs[0].shape_[1];
if (inputs[0].shape_[2] == 1) return;
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
DType* input = inputs[0].dptr<DType>();
DType* output = outputs[0].dptr<DType>();
for (int i = 0; i < length; ++i) {
float h, l, s;
float r = static_cast<float>(*(input++));
float g = static_cast<float>(*(input++));
float b = static_cast<float>(*(input++));
RGB2HLSConvert(r, g, b, &h, &l, &s);
h += alpha * 360.f;
HLS2RGBConvert(h, l, s, &r, &g, &b);
*(output++) = saturate_cast<DType>(r);
*(output++) = saturate_cast<DType>(g);
*(output++) = saturate_cast<DType>(b);
}
});
}
inline void RandomHue(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomEnhanceParam ¶m = nnvm::get<RandomEnhanceParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, real_t>(s);
float alpha = std::uniform_real_distribution<float>(
param.min_factor, param.max_factor)(prnd->GetRndEngine());
AdjustHueImpl(alpha, ctx, inputs, req, outputs);
}
struct RandomColorJitterParam : public dmlc::Parameter<RandomColorJitterParam> {
float brightness;
float contrast;
float saturation;
float hue;
DMLC_DECLARE_PARAMETER(RandomColorJitterParam) {
DMLC_DECLARE_FIELD(brightness)
.describe("How much to jitter brightness.");
DMLC_DECLARE_FIELD(contrast)
.describe("How much to jitter contrast.");
DMLC_DECLARE_FIELD(saturation)
.describe("How much to jitter saturation.");
DMLC_DECLARE_FIELD(hue)
.describe("How much to jitter hue.");
}
};
inline void RandomColorJitter(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomColorJitterParam ¶m = nnvm::get<RandomColorJitterParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, real_t>(s);
int order[4] = {0, 1, 2, 3};
std::shuffle(order, order + 4, prnd->GetRndEngine());
bool flag = false;
for (int i = 0; i < 4; ++i) {
switch (order[i]) {
case 0:
if (param.brightness > 0) {
float alpha_b = 1.0 + std::uniform_real_distribution<float>(
-param.brightness, param.brightness)(prnd->GetRndEngine());
AdjustBrightnessImpl(alpha_b, ctx, flag ? outputs : inputs, req, outputs);
flag = true;
}
break;
case 1:
if (param.contrast > 0) {
float alpha_c = 1.0 + std::uniform_real_distribution<float>(
-param.contrast, param.contrast)(prnd->GetRndEngine());
AdjustContrastImpl(alpha_c, ctx, flag ? outputs : inputs, req, outputs);
flag = true;
}
break;
case 2:
if (param.saturation > 0) {
float alpha_s = 1.f + std::uniform_real_distribution<float>(
-param.saturation, param.saturation)(prnd->GetRndEngine());
AdjustSaturationImpl(alpha_s, ctx, flag ? outputs : inputs, req, outputs);
flag = true;
}
break;
case 3:
if (param.hue > 0) {
float alpha_h = std::uniform_real_distribution<float>(
-param.hue, param.hue)(prnd->GetRndEngine());
AdjustHueImpl(alpha_h, ctx, flag ? outputs : inputs, req, outputs);
flag = true;
}
break;
}
}
}
struct AdjustLightingParam : public dmlc::Parameter<AdjustLightingParam> {
mxnet::Tuple<float> alpha;
DMLC_DECLARE_PARAMETER(AdjustLightingParam) {
DMLC_DECLARE_FIELD(alpha)
.describe("The lighting alphas for the R, G, B channels.");
}
};
struct RandomLightingParam : public dmlc::Parameter<RandomLightingParam> {
float alpha_std;
DMLC_DECLARE_PARAMETER(RandomLightingParam) {
DMLC_DECLARE_FIELD(alpha_std)
.set_default(0.05)
.describe("Level of the lighting noise.");
}
};
inline void AdjustLightingImpl(const mxnet::Tuple<float>& alpha,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
static const float eig[3][3] = {
{ 55.46 * -0.5675, 4.794 * 0.7192, 1.148 * 0.4009 },
{ 55.46 * -0.5808, 4.794 * -0.0045, 1.148 * -0.8140 },
{ 55.46 * -0.5836, 4.794 * -0.6948, 1.148 * 0.4203 }
};
int length = inputs[0].shape_[0] * inputs[0].shape_[1];
int channels = inputs[0].shape_[2];
if (channels == 1) return;
float pca_r = eig[0][0] * alpha[0] + eig[0][1] * alpha[1] + eig[0][2] * alpha[2];
float pca_g = eig[1][0] * alpha[0] + eig[1][1] * alpha[1] + eig[1][2] * alpha[2];
float pca_b = eig[2][0] * alpha[0] + eig[2][1] * alpha[1] + eig[2][2] * alpha[2];
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
DType* output = outputs[0].dptr<DType>();
DType* input = inputs[0].dptr<DType>();
for (int i = 0; i < length; i++) {
int base_ind = 3 * i;
float in_r = static_cast<float>(input[base_ind]);
float in_g = static_cast<float>(input[base_ind + 1]);
float in_b = static_cast<float>(input[base_ind + 2]);
output[base_ind] = saturate_cast<DType>(in_r + pca_r);
output[base_ind + 1] = saturate_cast<DType>(in_g + pca_g);
output[base_ind + 2] = saturate_cast<DType>(in_b + pca_b);
}
});
}
inline void AdjustLighting(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const AdjustLightingParam ¶m = nnvm::get<AdjustLightingParam>(attrs.parsed);
AdjustLightingImpl(param.alpha, ctx, inputs, req, outputs);
}
inline void RandomLighting(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
const RandomLightingParam ¶m = nnvm::get<RandomLightingParam>(attrs.parsed);
Stream<cpu> *s = ctx.get_stream<cpu>();
Random<cpu> *prnd = ctx.requested[0].get_random<cpu, float>(s);
std::normal_distribution<float> dist(0, param.alpha_std);
float alpha_r = dist(prnd->GetRndEngine());
float alpha_g = dist(prnd->GetRndEngine());
float alpha_b = dist(prnd->GetRndEngine());
AdjustLightingImpl({alpha_r, alpha_g, alpha_b}, ctx, inputs, req, outputs);
}
#define MXNET_REGISTER_IMAGE_AUG_OP(name) \
NNVM_REGISTER_OP(name) \
.set_num_inputs(1) \
.set_num_outputs(1) \
.set_attr<nnvm::FInplaceOption>("FInplaceOption", \
[](const NodeAttrs& attrs){ \
return std::vector<std::pair<int, int> >{{0, 0}}; \
}) \
.set_attr<mxnet::FInferShape>("FInferShape", ImageShape) \
.set_attr<nnvm::FInferType>("FInferType", ElemwiseType<1, 1>) \
.set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseNone{ "_copy" }) \
.add_argument("data", "NDArray-or-Symbol", "The input.")
#define MXNET_REGISTER_IMAGE_RND_AUG_OP(name) \
MXNET_REGISTER_IMAGE_AUG_OP(name) \
.set_attr<FResourceRequest>("FResourceRequest", \
[](const NodeAttrs& attrs) { \
return std::vector<ResourceRequest>{ResourceRequest::kRandom}; \
})
} // namespace image
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_IMAGE_IMAGE_RANDOM_INL_H_
|
critical_flet.h | /*++
Copyright (c) 2011 Microsoft Corporation
Module Name:
critical flet.cpp
Abstract:
Version of flet using "omp critical" directive.
Warning: it uses omp critical section "critical_flet"
Author:
Leonardo de Moura (leonardo) 2011-05-12
Revision History:
--*/
#ifndef _CRITICAL_FLET_H_
#define _CRITICAL_FLET_H_
template<typename T>
class critical_flet {
T & m_ref;
T m_old_value;
public:
critical_flet(T & ref, const T & new_value):
m_ref(ref),
m_old_value(ref) {
#pragma omp critical (critical_flet)
{
m_ref = new_value;
}
}
~critical_flet() {
#pragma omp critical (critical_flet)
{
m_ref = m_old_value;
}
}
};
#endif
|
i3lock-fancy-rapid.c | /*
* BSD 3-Clause License
*
* Copyright (c) 2018-2019, The i3lock-fancy-rapid authors
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <malloc.h>
#include <unistd.h>
#include <sys/wait.h>
#include <X11/Xlib.h>
#include <X11/Xutil.h>
#include <omp.h>
#include <string.h>
#include <math.h>
void box_blur_h(unsigned char *dest, unsigned char *src, int height, int width,
int radius)
{
double coeff = 1.0 / (radius * 2 + 1);
#pragma omp parallel for
for (int i = 0; i < height; ++i) {
int iwidth = i * width;
double r_acc = 0.0;
double g_acc = 0.0;
double b_acc = 0.0;
for (int j = -radius; j < width; ++j) {
if (j - radius - 1 >= 0) {
int index = (iwidth + j - radius - 1) * 3;
r_acc -= coeff * src[index];
g_acc -= coeff * src[index + 1];
b_acc -= coeff * src[index + 2];
}
if (j + radius < width) {
int index = (iwidth + j + radius) * 3;
r_acc += coeff * src[index];
g_acc += coeff * src[index + 1];
b_acc += coeff * src[index + 2];
}
if (j < 0)
continue;
int index = (iwidth + j) * 3;
dest[index] = r_acc + 0.5;
dest[index + 1] = g_acc + 0.5;
dest[index + 2] = b_acc + 0.5;
}
}
}
static inline void transpose(unsigned char *dest, unsigned char *src, int height, int width) {
for (int i = 0; i < height; ++i) {
int iwidth = i * width;
for (int j = 0; j < width; ++j) {
int nIndex = 3 * (iwidth + j);
int tIndex = 3 * (j * height + i);
dest[tIndex] = src[nIndex];
dest[tIndex+1] = src[nIndex+1];
dest[tIndex+2] = src[nIndex+2];
}
}
}
void box_blur(unsigned char *dest, unsigned char *src, int height, int width,
int radius, int times)
{
for (int i = 0; i < times; ++i) {
box_blur_h(dest, src, height, width, radius);
memcpy(src, dest, height * width * 3);
}
transpose(src, dest, height, width);
for (int i = 0; i < times; ++i) {
box_blur_h(dest, src, width, height, radius);
memcpy(src, dest, height * width * 3);
}
transpose(dest, src, width, height);
}
void adjust(unsigned char *src, int height, int width, int brightness)
{
int len = height * width * 3;
if (brightness < 100) {
#pragma omp parallel for
for (int i = 0; i < len; i++) {
src[i] = src[i] * brightness / 100;
}
}
if (brightness > 100) {
#pragma omp parallel for
for (int i = 0; i < len; i++) {
int rev = 255 - src[i];
rev = rev * 100 / brightness;
src[i] = 255 - rev;
}
}
}
void pixelate(unsigned char *dest, unsigned char *src, int height,
int width, int radius)
{
radius = radius * 2 + 1;
#pragma omp parallel for
for (int i = 0; i < height; i += radius) {
for (int j = 0; j < width; j += radius) {
int amount = 0;
int r = 0;
int g = 0;
int b = 0;
for (int k = 0; k < radius; ++k) {
if (i + k >= height)
break;
for (int l = 0; l < radius; ++l) {
if (j + l >= width)
break;
++amount;
int index = ((i + k) * width + (j + l)) * 3;
r += src[index];
g += src[index + 1];
b += src[index + 2];
}
}
r /= amount;
g /= amount;
b /= amount;
for (int k = 0; k < radius; ++k) {
if (i + k >= height)
break;
for (int l = 0; l < radius; ++l) {
if (j + l >= width)
break;
int index = ((i + k) * width + (j + l)) * 3;
dest[index] = r;
dest[index + 1] = g;
dest[index + 2] = b;
}
}
}
}
}
int main(int argc, char *argv[])
{
if (argc < 4) {
fprintf(stderr,
"usage: %s radius times brightness [OPTIONS]\n"
"pass \"pixel\" for times to get pixelation\n",
argv[0]);
exit(EXIT_FAILURE);
}
// TODO: validate arguments
Display *display = XOpenDisplay(NULL);
Window root = XDefaultRootWindow(display);
XWindowAttributes gwa;
XGetWindowAttributes(display, root, &gwa);
int height = gwa.height;
int width = gwa.width;
unsigned char *preblur = malloc(height * width * 3);
XImage *image = XGetImage(display, root, 0, 0, width, height, AllPlanes,
ZPixmap);
for (int i = 0; i < height; ++i) {
int iwidth = i * width;
for (int j = 0; j < width; ++j) {
int index = (iwidth + j) * 3;
unsigned long pixel = XGetPixel(image, j, i);
preblur[index] = (pixel & image->red_mask) >> 16;
preblur[index + 1] = (pixel & image->green_mask) >> 8;
preblur[index + 2] = pixel & image->blue_mask;
}
}
XDestroyImage(image);
XDestroyWindow(display, root);
XCloseDisplay(display);
unsigned char *postblur = malloc(height * width * 3);
int radius = atoi(argv[1]);
if (radius < 0) {
fprintf(stderr, "Radius has to be non-negative!\n");
exit(EXIT_FAILURE);
}
if (strcmp(argv[2], "pixel") == 0) {
pixelate(postblur, preblur, height, width, radius);
} else {
int times = atoi(argv[2]);
if (times < 0) {
fprintf(stderr, "Times has to be non-negative!\n");
exit(EXIT_FAILURE);
}
box_blur(postblur, preblur, height, width, radius, times);
}
free(preblur);
int brightness = atoi(argv[3]);
adjust(postblur, height, width, brightness);
int fds[2];
pipe(fds);
if (fork()) {
write(fds[1], postblur, height * width * 3);
int status;
wait(&status);
exit(WEXITSTATUS(status));
} else {
dup2(fds[0], STDIN_FILENO);
char fmt[32];
snprintf(fmt, sizeof(fmt), "%ix%i:rgb", width, height);
char *new_argv[argc + 3];
new_argv[0] = "i3lock";
new_argv[1] = "-i";
new_argv[2] = "/dev/stdin";
new_argv[3] = "--raw";
new_argv[4] = fmt;
for (int i = 4; i < argc; ++i)
new_argv[i + 2] = argv[i];
new_argv[argc + 2] = NULL;
execvp(new_argv[0], new_argv);
exit(EXIT_FAILURE);
}
}
|
nr_numint.c | /* Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdlib.h>
#include <assert.h>
#include "config.h"
#include "gto/grid_ao_drv.h"
#include "np_helper/np_helper.h"
#include "vhf/fblas.h"
#define BOXSIZE 56
int VXCao_empty_blocks(char *empty, unsigned char *non0table, int *shls_slice,
int *ao_loc)
{
if (non0table == NULL || shls_slice == NULL || ao_loc == NULL) {
return 0;
}
const int sh0 = shls_slice[0];
const int sh1 = shls_slice[1];
int bas_id;
int box_id = 0;
int bound = BOXSIZE;
int has0 = 0;
empty[box_id] = 1;
for (bas_id = sh0; bas_id < sh1; bas_id++) {
empty[box_id] &= !non0table[bas_id];
if (ao_loc[bas_id] == bound) {
has0 |= empty[box_id];
box_id++;
bound += BOXSIZE;
empty[box_id] = 1;
} else if (ao_loc[bas_id] > bound) {
has0 |= empty[box_id];
box_id++;
bound += BOXSIZE;
empty[box_id] = !non0table[bas_id];
}
}
return has0;
}
static void dot_ao_dm(double *vm, double *ao, double *dm,
int nao, int nocc, int ngrids, int bgrids,
unsigned char *non0table, int *shls_slice, int *ao_loc)
{
int nbox = (nao+BOXSIZE-1) / BOXSIZE;
char empty[nbox];
int has0 = VXCao_empty_blocks(empty, non0table, shls_slice, ao_loc);
const char TRANS_T = 'T';
const char TRANS_N = 'N';
const double D1 = 1;
double beta = 0;
if (has0) {
int box_id, blen, i, j;
size_t b0;
for (box_id = 0; box_id < nbox; box_id++) {
if (!empty[box_id]) {
b0 = box_id * BOXSIZE;
blen = MIN(nao-b0, BOXSIZE);
dgemm_(&TRANS_N, &TRANS_T, &bgrids, &nocc, &blen,
&D1, ao+b0*ngrids, &ngrids, dm+b0*nocc, &nocc,
&beta, vm, &ngrids);
beta = 1.0;
}
}
if (beta == 0) { // all empty
for (i = 0; i < nocc; i++) {
for (j = 0; j < bgrids; j++) {
vm[i*ngrids+j] = 0;
}
}
}
} else {
dgemm_(&TRANS_N, &TRANS_T, &bgrids, &nocc, &nao,
&D1, ao, &ngrids, dm, &nocc, &beta, vm, &ngrids);
}
}
/* vm[nocc,ngrids] = ao[i,ngrids] * dm[i,nocc] */
void VXCdot_ao_dm(double *vm, double *ao, double *dm,
int nao, int nocc, int ngrids, int nbas,
unsigned char *non0table, int *shls_slice, int *ao_loc)
{
const int nblk = (ngrids+BLKSIZE-1) / BLKSIZE;
#pragma omp parallel
{
int ip, ib;
#pragma omp for nowait schedule(static)
for (ib = 0; ib < nblk; ib++) {
ip = ib * BLKSIZE;
dot_ao_dm(vm+ip, ao+ip, dm,
nao, nocc, ngrids, MIN(ngrids-ip, BLKSIZE),
non0table+ib*nbas, shls_slice, ao_loc);
}
}
}
/* vv[n,m] = ao1[n,ngrids] * ao2[m,ngrids] */
static void dot_ao_ao(double *vv, double *ao1, double *ao2,
int nao, int ngrids, int bgrids, int hermi,
unsigned char *non0table, int *shls_slice, int *ao_loc)
{
int nbox = (nao+BOXSIZE-1) / BOXSIZE;
char empty[nbox];
int has0 = VXCao_empty_blocks(empty, non0table, shls_slice, ao_loc);
const char TRANS_T = 'T';
const char TRANS_N = 'N';
const double D1 = 1;
if (has0) {
int ib, jb, leni, lenj;
int j1 = nbox;
size_t b0i, b0j;
for (ib = 0; ib < nbox; ib++) {
if (!empty[ib]) {
b0i = ib * BOXSIZE;
leni = MIN(nao-b0i, BOXSIZE);
if (hermi) {
j1 = ib + 1;
}
for (jb = 0; jb < j1; jb++) {
if (!empty[jb]) {
b0j = jb * BOXSIZE;
lenj = MIN(nao-b0j, BOXSIZE);
dgemm_(&TRANS_T, &TRANS_N, &lenj, &leni, &bgrids, &D1,
ao2+b0j*ngrids, &ngrids, ao1+b0i*ngrids, &ngrids,
&D1, vv+b0i*nao+b0j, &nao);
} }
} }
} else {
dgemm_(&TRANS_T, &TRANS_N, &nao, &nao, &bgrids,
&D1, ao2, &ngrids, ao1, &ngrids, &D1, vv, &nao);
}
}
/* vv[nao,nao] = ao1[i,nao] * ao2[i,nao] */
void VXCdot_ao_ao(double *vv, double *ao1, double *ao2,
int nao, int ngrids, int nbas, int hermi,
unsigned char *non0table, int *shls_slice, int *ao_loc)
{
const int nblk = (ngrids+BLKSIZE-1) / BLKSIZE;
size_t Nao = nao;
NPdset0(vv, Nao * Nao);
#pragma omp parallel
{
int ip, ib;
double *v_priv = calloc(Nao*Nao+2, sizeof(double));
#pragma omp for nowait schedule(static)
for (ib = 0; ib < nblk; ib++) {
ip = ib * BLKSIZE;
dot_ao_ao(v_priv, ao1+ip, ao2+ip,
nao, ngrids, MIN(ngrids-ip, BLKSIZE), hermi,
non0table+ib*nbas, shls_slice, ao_loc);
}
#pragma omp critical
{
for (ip = 0; ip < Nao*Nao; ip++) {
vv[ip] += v_priv[ip];
}
}
free(v_priv);
}
if (hermi != 0) {
NPdsymm_triu(nao, vv, hermi);
}
}
// 'nip,np->ip'
void VXC_dscale_ao(double *aow, double *ao, double *wv,
int comp, int nao, int ngrids)
{
#pragma omp parallel
{
size_t Ngrids = ngrids;
size_t ao_size = nao * Ngrids;
int i, j, ic;
double *pao = ao;
#pragma omp for schedule(static)
for (i = 0; i < nao; i++) {
pao = ao + i * Ngrids;
for (j = 0; j < Ngrids; j++) {
aow[i*Ngrids+j] = pao[j] * wv[j];
}
for (ic = 1; ic < comp; ic++) {
for (j = 0; j < Ngrids; j++) {
aow[i*Ngrids+j] += pao[ic*ao_size+j] * wv[ic*Ngrids+j];
} }
}
}
}
// 'ip,ip->p'
void VXC_dcontract_rho(double *rho, double *bra, double *ket,
int nao, int ngrids)
{
#pragma omp parallel
{
size_t Ngrids = ngrids;
int nthread = omp_get_num_threads();
int blksize = MAX((Ngrids+nthread-1) / nthread, 1);
int ib, b0, b1, i, j;
#pragma omp for
for (ib = 0; ib < nthread; ib++) {
b0 = ib * blksize;
b1 = MIN(b0 + blksize, ngrids);
for (j = b0; j < b1; j++) {
rho[j] = bra[j] * ket[j];
}
for (i = 1; i < nao; i++) {
for (j = b0; j < b1; j++) {
rho[j] += bra[i*Ngrids+j] * ket[i*Ngrids+j];
} }
}
}
}
void VXC_vv10nlc(double *Fvec, double *Uvec, double *Wvec,
double *vvcoords, double *coords,
double *W0p, double *W0, double *K, double *Kp, double *RpW,
int vvngrids, int ngrids)
{
#pragma omp parallel
{
double DX, DY, DZ, R2;
double gp, g, gt, T, F, U, W;
int i, j;
#pragma omp for schedule(static)
for (i = 0; i < ngrids; i++) {
F = 0;
U = 0;
W = 0;
for (j = 0; j < vvngrids; j++) {
DX = vvcoords[j*3+0] - coords[i*3+0];
DY = vvcoords[j*3+1] - coords[i*3+1];
DZ = vvcoords[j*3+2] - coords[i*3+2];
R2 = DX*DX + DY*DY + DZ*DZ;
gp = R2*W0p[j] + Kp[j];
g = R2*W0[i] + K[i];
gt = g + gp;
T = RpW[j] / (g*gp*gt);
F += T;
T *= 1./g + 1./gt;
U += T;
W += T * R2;
}
Fvec[i] = F * -1.5;
Uvec[i] = U;
Wvec[i] = W;
}
}
}
|
kruskal.c |
/******************************************************************************
* INCLUDES
*****************************************************************************/
#include "kruskal.h"
#include <math.h>
#include <omp.h>
/******************************************************************************
* PRIVATE FUNCTIONS
*****************************************************************************/
/******************************************************************************
* API FUNCTIONS
*****************************************************************************/
void splatt_free_kruskal(
splatt_kruskal * factored)
{
free(factored->lambda);
for(idx_t m=0; m < factored->nmodes; ++m) {
free(factored->factors[m]);
}
}
int splatt_kruskal_predict(
splatt_kruskal const * const factored,
splatt_idx_t const * const coords,
splatt_val_t * const predicted)
{
/* check for out of bounds */
for(idx_t m=0; m < factored->nmodes; ++m) {
if(coords[m] >= factored->dims[m]) {
return SPLATT_ERROR_BADINPUT;
}
}
/* initialize accumulation of each latent factor with lambda(r) */
idx_t const nfactors = factored->rank;
val_t * restrict accum = splatt_malloc(nfactors * sizeof(*accum));
for(idx_t f=0; f < nfactors; ++f) {
accum[f] = factored->lambda[f];
}
/* now multiply each factor by A(i,:), B(j,:) ... */
for(idx_t m=0; m < factored->nmodes; ++m) {
val_t const * const restrict row = factored->factors[m] +
(coords[m] * nfactors);
for(idx_t f=0; f < nfactors; ++f) {
accum[f] *= row[f];
}
}
/* finally, sum the factors to form the final estimated value */
val_t est = 0;
for(idx_t f=0; f < nfactors; ++f) {
est += accum[f];
}
splatt_free(accum);
*predicted = est;
return SPLATT_SUCCESS;
}
/******************************************************************************
* PUBLIC FUNCTIONS
*****************************************************************************/
val_t kruskal_calc_fit(
idx_t const nmodes,
rank_info * const rinfo,
thd_info * const thds,
val_t const ttnormsq,
val_t const * const restrict lambda,
matrix_t ** mats,
matrix_t const * const mttkrp,
matrix_t ** aTa)
{
timer_start(&timers[TIMER_FIT]);
/* First get norm of new model: lambda^T * (hada aTa) * lambda. */
val_t const norm_mats = kruskal_norm(nmodes, lambda, aTa);
/* Compute inner product of tensor with new model */
val_t const inner = kruskal_mttkrp_inner(nmodes, rinfo, thds, lambda, mats,
mttkrp);
val_t const residual = sqrt(ttnormsq + norm_mats - (2 * inner));
timer_stop(&timers[TIMER_FIT]);
return 1 - (residual / sqrt(ttnormsq));
}
val_t kruskal_mttkrp_inner(
idx_t const nmodes,
rank_info * const rinfo,
thd_info * const thds,
val_t const * const restrict lambda,
matrix_t ** mats,
matrix_t const * const m1)
{
idx_t const rank = mats[0]->J;
idx_t const lastm = nmodes - 1;
idx_t const dim = m1->I;
val_t const * const m0 = mats[lastm]->vals;
val_t const * const mv = m1->vals;
val_t myinner = 0;
#pragma omp parallel reduction(+:myinner)
{
int const tid = omp_get_thread_num();
val_t * const restrict accumF = (val_t *) thds[tid].scratch[0];
for(idx_t r=0; r < rank; ++r) {
accumF[r] = 0.;
}
#pragma omp for
for(idx_t i=0; i < dim; ++i) {
for(idx_t r=0; r < rank; ++r) {
accumF[r] += m0[r+(i*rank)] * mv[r+(i*rank)];
}
}
/* accumulate everything into 'myinner' */
for(idx_t r=0; r < rank; ++r) {
myinner += accumF[r] * lambda[r];
}
}
val_t inner = 0.;
#ifdef SPLATT_USE_MPI
timer_start(&timers[TIMER_MPI_FIT]);
timer_start(&timers[TIMER_MPI_IDLE]);
MPI_Barrier(rinfo->comm_3d);
timer_stop(&timers[TIMER_MPI_IDLE]);
MPI_Allreduce(&myinner, &inner, 1, SPLATT_MPI_VAL, MPI_SUM, rinfo->comm_3d);
timer_stop(&timers[TIMER_MPI_FIT]);
#else
inner = myinner;
#endif
return inner;
}
val_t kruskal_norm(
idx_t const nmodes,
val_t const * const restrict lambda,
matrix_t ** aTa)
{
idx_t const rank = aTa[0]->J;
val_t * const restrict av = aTa[MAX_NMODES]->vals;
val_t norm_mats = 0;
/* use aTa[MAX_NMODES] as scratch space */
for(idx_t i=0; i < rank; ++i) {
for(idx_t j=i; j < rank; ++j) {
av[j + (i*rank)] = 1.;
}
}
/* aTa[MAX_NMODES] = hada(aTa) */
for(idx_t m=0; m < nmodes; ++m) {
val_t const * const restrict atavals = aTa[m]->vals;
for(idx_t i=0; i < rank; ++i) {
for(idx_t j=i; j < rank; ++j) {
av[j + (i*rank)] *= atavals[j + (i*rank)];
}
}
}
/* now compute lambda^T * aTa[MAX_NMODES] * lambda */
for(idx_t i=0; i < rank; ++i) {
norm_mats += av[i+(i*rank)] * lambda[i] * lambda[i];
for(idx_t j=i+1; j < rank; ++j) {
norm_mats += av[j+(i*rank)] * lambda[i] * lambda[j] * 2;
}
}
return fabs(norm_mats);
}
|
GB_binop__max_int16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__max_int16)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__max_int16)
// A.*B function (eWiseMult): GB (_AemultB_03__max_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__max_int16)
// A*D function (colscale): GB (_AxD__max_int16)
// D*A function (rowscale): GB (_DxB__max_int16)
// C+=B function (dense accum): GB (_Cdense_accumB__max_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__max_int16)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__max_int16)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__max_int16)
// C=scalar+B GB (_bind1st__max_int16)
// C=scalar+B' GB (_bind1st_tran__max_int16)
// C=A+scalar GB (_bind2nd__max_int16)
// C=A'+scalar GB (_bind2nd_tran__max_int16)
// C type: int16_t
// A type: int16_t
// B,b type: int16_t
// BinaryOp: cij = GB_IMAX (aij, bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_IMAX (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MAX || GxB_NO_INT16 || GxB_NO_MAX_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__max_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__max_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__max_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__max_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__max_int16)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__max_int16)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__max_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__max_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__max_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__max_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__max_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__max_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = Bx [p] ;
Cx [p] = GB_IMAX (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__max_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = Ax [p] ;
Cx [p] = GB_IMAX (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = GB_IMAX (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__max_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = GB_IMAX (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__max_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
image_random-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file image_random-inl.h
* \brief
* \author
*/
#ifndef MXNET_OPERATOR_IMAGE_IMAGE_RANDOM_INL_H_
#define MXNET_OPERATOR_IMAGE_IMAGE_RANDOM_INL_H_
#include <algorithm>
#include <cmath>
#include <limits>
#include <tuple>
#include <utility>
#include <vector>
#include "mxnet/base.h"
#include "../mxnet_op.h"
#include "../operator_common.h"
#if MXNET_USE_OPENCV
#include <opencv2/opencv.hpp>
#endif // MXNET_USE_OPENCV
namespace mxnet {
namespace op {
namespace image {
using namespace mshadow;
#if MXNET_USE_CUDA
// NOTE: Kernel launch/map was extremely costly.
// Hence, we use separate CUDA kernels for these operators.
template <typename DType, typename T1, typename T2>
void ToTensorImplCUDA(mshadow::Stream<gpu>* s,
const T1 input,
const T2 output,
const int req,
const float normalize_factor);
template <typename DType>
void NormalizeImplCUDA(mshadow::Stream<gpu>* s,
const DType* input,
DType* output,
const int req,
const int N,
const int C,
const int H,
const int W,
const float mean_d0,
const float mean_d1,
const float mean_d2,
const float std_d0,
const float std_d1,
const float std_d2);
template <typename DType>
void NormalizeBackwardImplCUDA(mshadow::Stream<gpu>* s,
const DType* out_grad,
DType* in_grad,
const int req,
const int N,
const int C,
const int H,
const int W,
const float std_d0,
const float std_d1,
const float std_d2);
#endif // MXNET_USE_CUDA
// Shape and Type inference for image to tensor operator
inline bool ToTensorShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
mxnet::TShape& shp = (*in_attrs)[0];
if (!shape_is_known(shp))
return false;
CHECK((shp.ndim() == 3) || (shp.ndim() == 4))
<< "Input image must have shape (height, width, channels), or "
<< "(N, height, width, channels) but got " << shp;
if (shp.ndim() == 3) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::TShape({shp[2], shp[0], shp[1]}));
} else if (shp.ndim() == 4) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::TShape({shp[0], shp[3], shp[1], shp[2]}));
}
return true;
}
inline bool ToTensorType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
TYPE_ASSIGN_CHECK(*out_attrs, 0, mshadow::kFloat32);
return (*in_attrs)[0] != -1;
}
// Operator Implementation
template <typename DType, int req>
inline void ToTensor(float* out_data,
const DType* in_data,
const int length,
const int channels,
const float normalize_factor,
const int step) {
// Microsoft Visual C++ compiler does not support omp collapse
#ifdef _MSC_VER
#pragma omp parallel for
#else
#pragma omp parallel for collapse(2)
#endif // _MSC_VER
for (int c = 0; c < channels; ++c) {
for (int i = 0; i < length; ++i) {
KERNEL_ASSIGN(out_data[step + c * length + i],
req,
(in_data[step + i * channels + c]) / normalize_factor);
}
}
}
inline void ToTensorImpl(const std::vector<TBlob>& inputs,
const std::vector<TBlob>& outputs,
const std::vector<OpReqType>& req,
const int length,
const int channel,
const float normalize_factor,
const int step) {
MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
float* output = outputs[0].dptr<float>();
DType* input = inputs[0].dptr<DType>();
ToTensor<DType, req_type>(output, input, length, channel, normalize_factor, step);
});
});
}
template <typename xpu>
void ToTensorOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
// We do not use temp buffer when performance the operation.
// Hence, this check is necessary.
CHECK_EQ(req[0], kWriteTo) << "`to_tensor` does not support inplace updates";
const float normalize_factor = 255.0f;
if (std::is_same<xpu, gpu>::value) {
#if MXNET_USE_CUDA
mshadow::Stream<gpu>* s = ctx.get_stream<gpu>();
MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
if (inputs[0].ndim() == 3) {
Tensor<gpu, 3, DType> input = inputs[0].get<gpu, 3, DType>(s);
Tensor<gpu, 3, float> output = outputs[0].get<gpu, 3, float>(s);
ToTensorImplCUDA<DType, Tensor<gpu, 3, DType>, Tensor<gpu, 3, float>>(
s, input, output, req_type, normalize_factor);
} else {
Tensor<gpu, 4, DType> input = inputs[0].get<gpu, 4, DType>(s);
Tensor<gpu, 4, float> output = outputs[0].get<gpu, 4, float>(s);
ToTensorImplCUDA<DType, Tensor<gpu, 4, DType>, Tensor<gpu, 4, float>>(
s, input, output, req_type, normalize_factor);
}
});
});
#else
LOG(FATAL) << "Compile with USE_CUDA=1 to use ToTensor operator on GPU.";
#endif // MXNET_USE_CUDA
} else if (inputs[0].ndim() == 3) {
// 3D Input - (h, w, c)
const int length = inputs[0].shape_[0] * inputs[0].shape_[1];
const int channel = static_cast<int>(inputs[0].shape_[2]);
const int step = 0;
ToTensorImpl(inputs, outputs, req, length, channel, normalize_factor, step);
} else if (inputs[0].ndim() == 4) {
// 4D input (n, h, w, c)
const int batch_size = inputs[0].shape_[0];
const int length = inputs[0].shape_[1] * inputs[0].shape_[2];
const int channel = static_cast<int>(inputs[0].shape_[3]);
const int step = channel * length;
#pragma omp parallel for
for (auto n = 0; n < batch_size; ++n) {
ToTensorImpl(inputs, outputs, req, length, channel, normalize_factor, n * step);
}
}
}
struct NormalizeParam : public dmlc::Parameter<NormalizeParam> {
mxnet::Tuple<float> mean;
mxnet::Tuple<float> std;
DMLC_DECLARE_PARAMETER(NormalizeParam) {
DMLC_DECLARE_FIELD(mean)
.set_default(mxnet::Tuple<float>{0.0f, 0.0f, 0.0f, 0.0f})
.describe(
"Sequence of means for each channel. "
"Default value is 0.");
DMLC_DECLARE_FIELD(std)
.set_default(mxnet::Tuple<float>{1.0f, 1.0f, 1.0f, 1.0f})
.describe(
"Sequence of standard deviations for each channel. "
"Default value is 1.");
}
};
// Shape and Type inference for image Normalize operator
// Shape inference
inline bool NormalizeOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
const NormalizeParam& param = nnvm::get<NormalizeParam>(attrs.parsed);
const auto& dshape = (*in_attrs)[0];
if (!dshape.ndim())
return false;
CHECK((dshape.ndim() == 3) || (dshape.ndim() == 4))
<< "Input tensor must have shape (channels, height, width), or "
<< "(N, channels, height, width), but got " << dshape;
int nchannels = 0;
if (dshape.ndim() == 3) {
nchannels = dshape[0];
CHECK(nchannels == 3 || nchannels == 1)
<< "The first dimension of input tensor must be the channel dimension with "
<< "either 1 or 3 elements, but got input with shape " << dshape;
} else if (dshape.ndim() == 4) {
nchannels = dshape[1];
CHECK(nchannels == 3 || nchannels == 1)
<< "The second dimension of input tensor must be the channel dimension with "
<< "either 1 or 3 elements, but got input with shape " << dshape;
}
CHECK((param.mean.ndim() == 1) || (param.mean.ndim() == nchannels))
<< "Invalid mean for input with shape " << dshape << ". mean must have either 1 or "
<< nchannels << " elements, but got " << param.mean;
CHECK(param.std.ndim() == 1 || param.std.ndim() == nchannels)
<< "Invalid std for input with shape " << dshape << ". std must have either 1 or "
<< nchannels << " elements, but got " << param.std;
SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape);
return true;
}
// Type Inference
inline bool NormalizeOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0));
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0));
return out_attrs->at(0) != -1;
}
template <typename DType, int req>
inline void Normalize(DType* out_data,
const DType* in_data,
const int length,
const int channels,
const int step,
const std::vector<float> mean,
const std::vector<float> std) {
// Microsoft Visual C++ compiler does not support omp collapse
#ifdef _MSC_VER
#pragma omp parallel for
#else
#pragma omp parallel for collapse(2)
#endif // _MSC_VER
for (int c = 0; c < channels; ++c) {
for (int i = 0; i < length; ++i) {
KERNEL_ASSIGN(out_data[step + c * length + i],
req,
(in_data[step + c * length + i] - mean[c]) / std[c]);
}
}
}
inline void NormalizeImpl(const std::vector<TBlob>& inputs,
const std::vector<TBlob>& outputs,
const std::vector<OpReqType>& req,
const int length,
const int channels,
const int step,
const std::vector<float> mean,
const std::vector<float> std) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
DType* input = inputs[0].dptr<DType>();
DType* output = outputs[0].dptr<DType>();
Normalize<DType, req_type>(output, input, length, channels, step, mean, std);
});
});
}
template <typename xpu>
void NormalizeOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
const NormalizeParam& param = nnvm::get<NormalizeParam>(attrs.parsed);
// Mean and Std can be 1 or 3D only.
std::vector<float> mean(3);
std::vector<float> std(3);
if (param.mean.ndim() == 1) {
mean[0] = mean[1] = mean[2] = param.mean[0];
} else {
mean[0] = param.mean[0];
mean[1] = param.mean[1];
mean[2] = param.mean[2];
}
if (param.std.ndim() == 1) {
std[0] = std[1] = std[2] = param.std[0];
} else {
std[0] = param.std[0];
std[1] = param.std[1];
std[2] = param.std[2];
}
if (std::is_same<xpu, gpu>::value) {
#if MXNET_USE_CUDA
mshadow::Stream<gpu>* s = ctx.get_stream<gpu>();
MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
int N, C, H, W;
DType* input = nullptr;
DType* output = nullptr;
if (inputs[0].ndim() == 3) {
N = 1;
C = static_cast<int>(inputs[0].shape_[0]);
H = static_cast<int>(inputs[0].shape_[1]);
W = static_cast<int>(inputs[0].shape_[2]);
input = (inputs[0].get<gpu, 3, DType>(s)).dptr_;
output = (outputs[0].get<gpu, 3, DType>(s)).dptr_;
} else {
N = static_cast<int>(inputs[0].shape_[0]);
C = static_cast<int>(inputs[0].shape_[1]);
H = static_cast<int>(inputs[0].shape_[2]);
W = static_cast<int>(inputs[0].shape_[3]);
input = (inputs[0].get<gpu, 4, DType>(s)).dptr_;
output = (outputs[0].get<gpu, 4, DType>(s)).dptr_;
}
NormalizeImplCUDA<DType>(s,
input,
output,
req_type,
N,
C,
H,
W,
mean[0],
mean[1],
mean[2],
std[0],
std[1],
std[2]);
});
});
#else
LOG(FATAL) << "Compile with USE_CUDA=1 to use Normalize operator on GPU.";
#endif // MXNET_USE_CUDA
} else if (inputs[0].ndim() == 3) {
// 3D input (c, h, w)
const int length = inputs[0].shape_[1] * inputs[0].shape_[2];
const int channel = static_cast<int>(inputs[0].shape_[0]);
const int step = 0;
NormalizeImpl(inputs, outputs, req, length, channel, step, mean, std);
} else if (inputs[0].ndim() == 4) {
// 4D input (n, c, h, w)
const int batch_size = inputs[0].shape_[0];
const int length = inputs[0].shape_[2] * inputs[0].shape_[3];
const int channel = static_cast<int>(inputs[0].shape_[1]);
const int step = channel * length;
#pragma omp parallel for
for (auto n = 0; n < batch_size; ++n) {
NormalizeImpl(inputs, outputs, req, length, channel, n * step, mean, std);
}
}
}
// Backward function
template <typename DType, int req>
inline void NormalizeBackward(const DType* out_grad,
DType* in_grad,
const int length,
const int channels,
const int step,
const std::vector<float> std) {
// Microsoft Visual C++ compiler does not support omp collapse
#ifdef _MSC_VER
#pragma omp parallel for
#else
#pragma omp parallel for collapse(2)
#endif // _MSC_VER
for (int c = 0; c < channels; ++c) {
for (int i = 0; i < length; ++i) {
KERNEL_ASSIGN(
in_grad[step + c * length + i], req, out_grad[step + c * length + i] * (1.0 / std[c]));
}
}
}
inline void NormalizeBackwardImpl(const std::vector<TBlob>& inputs,
const std::vector<TBlob>& outputs,
const std::vector<OpReqType>& req,
const int length,
const int channels,
const int step,
const std::vector<float> std) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
DType* out_grad = inputs[0].dptr<DType>();
DType* in_grad = outputs[0].dptr<DType>();
NormalizeBackward<DType, req_type>(out_grad, in_grad, length, channels, step, std);
});
});
}
template <typename xpu>
void NormalizeOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
const NormalizeParam& param = nnvm::get<NormalizeParam>(attrs.parsed);
// Std can be 1 or 3D only.
std::vector<float> std(3);
if (param.std.ndim() == 1) {
std[0] = std[1] = std[2] = param.std[0];
} else {
std[0] = param.std[0];
std[1] = param.std[1];
std[2] = param.std[2];
}
// Note: inputs[0] is out_grad
const TBlob& in_data = inputs[1];
if (std::is_same<xpu, gpu>::value) {
#if MXNET_USE_CUDA
mshadow::Stream<gpu>* s = ctx.get_stream<gpu>();
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
int N, C, H, W;
DType* in_grad = nullptr;
DType* out_grad = nullptr;
if (in_data.ndim() == 3) {
N = 1;
C = static_cast<int>(in_data.shape_[0]);
H = static_cast<int>(in_data.shape_[1]);
W = static_cast<int>(in_data.shape_[2]);
out_grad = (inputs[0].get<gpu, 3, DType>(s)).dptr_;
in_grad = (outputs[0].get<gpu, 3, DType>(s)).dptr_;
} else {
N = static_cast<int>(in_data.shape_[0]);
C = static_cast<int>(in_data.shape_[1]);
H = static_cast<int>(in_data.shape_[2]);
W = static_cast<int>(in_data.shape_[3]);
out_grad = (inputs[0].get<gpu, 4, DType>(s)).dptr_;
in_grad = (outputs[0].get<gpu, 4, DType>(s)).dptr_;
}
NormalizeBackwardImplCUDA<DType>(
s, out_grad, in_grad, req_type, N, C, H, W, std[0], std[1], std[2]);
});
});
#else
LOG(FATAL) << "Compile with USE_CUDA=1 to use Normalize backward operator on GPU.";
#endif // MXNET_USE_CUDA
} else if (in_data.ndim() == 3) {
// 3D input (c, h, w)
const int length = in_data.shape_[1] * in_data.shape_[2];
const int channel = static_cast<int>(in_data.shape_[0]);
const int step = 0;
NormalizeBackwardImpl(inputs, outputs, req, length, channel, step, std);
} else if (in_data.ndim() == 4) {
// 4D input (n, c, h, w)
const int batch_size = in_data.shape_[0];
const int length = in_data.shape_[2] * in_data.shape_[3];
const int channel = static_cast<int>(in_data.shape_[1]);
const int step = channel * length;
#pragma omp parallel for
for (auto n = 0; n < batch_size; ++n) {
NormalizeBackwardImpl(inputs, outputs, req, length, channel, n * step, std);
}
}
}
template <typename DType>
inline DType saturate_cast(const float& src) {
return static_cast<DType>(src);
}
template <>
inline uint8_t saturate_cast(const float& src) {
return std::min(std::max(src, 0.f), 255.f);
}
inline bool ImageShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
mxnet::TShape& dshape = (*in_attrs)[0];
CHECK_EQ(dshape.ndim(), 3) << "Input image must have shape (height, width, channels), but got "
<< dshape;
auto nchannels = dshape[dshape.ndim() - 1];
CHECK(nchannels == 3 || nchannels == 1)
<< "The last dimension of input image must be the channel dimension with "
<< "either 1 or 3 elements, but got input with shape " << dshape;
SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape);
return true;
}
template <typename DType, int axis>
void FlipImpl(const mxnet::TShape& shape, DType* src, DType* dst) {
int head = 1, mid = shape[axis], tail = 1;
for (int i = 0; i < axis; ++i)
head *= shape[i];
for (int i = axis + 1; i < shape.ndim(); ++i)
tail *= shape[i];
for (int i = 0; i < head; ++i) {
// if inplace flip, skip the mid point in axis, otherwise copy is required
int mid2 = (src == dst) ? mid >> 1 : (mid + 1) >> 1;
for (int j = 0; j < mid2; ++j) {
int idx1 = (i * mid + j) * tail;
int idx2 = idx1 + (mid - (j << 1) - 1) * tail;
for (int k = 0; k < tail; ++k, ++idx1, ++idx2) {
DType tmp = src[idx1];
dst[idx1] = src[idx2];
dst[idx2] = tmp;
}
}
}
}
inline void FlipLeftRight(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
FlipImpl<DType, 1>(inputs[0].shape_, inputs[0].dptr<DType>(), outputs[0].dptr<DType>());
});
}
inline void FlipTopBottom(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
FlipImpl<DType, 0>(inputs[0].shape_, inputs[0].dptr<DType>(), outputs[0].dptr<DType>());
});
}
struct RandomFlipParam : public dmlc::Parameter<RandomFlipParam> {
float p;
DMLC_DECLARE_PARAMETER(RandomFlipParam) {
DMLC_DECLARE_FIELD(p).set_default(0.5f).describe("The probablity of flipping the image.");
}
};
inline void RandomFlipLeftRight(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
const RandomFlipParam& param = nnvm::get<RandomFlipParam>(attrs.parsed);
Stream<cpu>* s = ctx.get_stream<cpu>();
Random<cpu>* prnd = ctx.requested[0].get_random<cpu, float>(s);
std::normal_distribution<float> dist(0, 1);
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
if (dist(prnd->GetRndEngine()) > param.p) {
if (outputs[0].dptr_ != inputs[0].dptr_) {
std::memcpy(outputs[0].dptr_, inputs[0].dptr_, inputs[0].Size() * sizeof(DType));
}
} else {
FlipImpl<DType, 1>(inputs[0].shape_, inputs[0].dptr<DType>(), outputs[0].dptr<DType>());
}
});
}
inline void RandomFlipTopBottom(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
const RandomFlipParam& param = nnvm::get<RandomFlipParam>(attrs.parsed);
Stream<cpu>* s = ctx.get_stream<cpu>();
Random<cpu>* prnd = ctx.requested[0].get_random<cpu, float>(s);
std::normal_distribution<float> dist(0, 1);
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
if (dist(prnd->GetRndEngine()) > param.p) {
if (outputs[0].dptr_ != inputs[0].dptr_) {
std::memcpy(outputs[0].dptr_, inputs[0].dptr_, inputs[0].Size() * sizeof(DType));
}
} else {
FlipImpl<DType, 0>(inputs[0].shape_, inputs[0].dptr<DType>(), outputs[0].dptr<DType>());
}
});
}
struct RandomEnhanceParam : public dmlc::Parameter<RandomEnhanceParam> {
float min_factor;
float max_factor;
DMLC_DECLARE_PARAMETER(RandomEnhanceParam) {
DMLC_DECLARE_FIELD(min_factor).set_lower_bound(0.0).describe("Minimum factor.");
DMLC_DECLARE_FIELD(max_factor).set_lower_bound(0.0).describe("Maximum factor.");
}
};
inline void AdjustBrightnessImpl(const float& alpha_b,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
int length = inputs[0].Size();
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
DType* output = outputs[0].dptr<DType>();
DType* input = inputs[0].dptr<DType>();
for (int l = 0; l < length; ++l) {
float val = static_cast<float>(input[l]) * alpha_b;
output[l] = saturate_cast<DType>(val);
}
});
}
inline void RandomBrightness(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
const RandomEnhanceParam& param = nnvm::get<RandomEnhanceParam>(attrs.parsed);
Stream<cpu>* s = ctx.get_stream<cpu>();
Random<cpu>* prnd = ctx.requested[0].get_random<cpu, float>(s);
float alpha_b = std::uniform_real_distribution<float>(param.min_factor,
param.max_factor)(prnd->GetRndEngine());
AdjustBrightnessImpl(alpha_b, ctx, inputs, req, outputs);
}
inline void AdjustContrastImpl(const float& alpha_c,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
static const float coef[] = {0.299f, 0.587f, 0.114f};
int length = inputs[0].shape_[0] * inputs[0].shape_[1];
int nchannels = inputs[0].shape_[2];
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
DType* output = outputs[0].dptr<DType>();
DType* input = inputs[0].dptr<DType>();
float sum = 0.f;
if (nchannels > 1) {
for (int l = 0; l < length; ++l) {
for (int c = 0; c < 3; ++c)
sum += input[l * 3 + c] * coef[c];
}
} else {
for (int l = 0; l < length; ++l)
sum += input[l];
}
float gray_mean = sum / static_cast<float>(length);
float beta = (1 - alpha_c) * gray_mean;
for (int l = 0; l < length * nchannels; ++l) {
float val = input[l] * alpha_c + beta;
output[l] = saturate_cast<DType>(val);
}
});
}
inline void RandomContrast(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
const RandomEnhanceParam& param = nnvm::get<RandomEnhanceParam>(attrs.parsed);
Stream<cpu>* s = ctx.get_stream<cpu>();
Random<cpu>* prnd = ctx.requested[0].get_random<cpu, real_t>(s);
float alpha_c = std::uniform_real_distribution<float>(param.min_factor,
param.max_factor)(prnd->GetRndEngine());
AdjustContrastImpl(alpha_c, ctx, inputs, req, outputs);
}
inline void AdjustSaturationImpl(const float& alpha_s,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
static const float coef[] = {0.299f, 0.587f, 0.114f};
int length = inputs[0].shape_[0] * inputs[0].shape_[1];
int nchannels = inputs[0].shape_[2];
float alpha_o = 1.f - alpha_s;
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
DType* output = outputs[0].dptr<DType>();
DType* input = inputs[0].dptr<DType>();
if (nchannels == 1) {
for (int l = 0; l < length; ++l)
output[l] = input[l];
return;
}
for (int l = 0; l < length; ++l) {
float gray = 0.f;
for (int c = 0; c < 3; ++c) {
gray = input[l * 3 + c] * coef[c];
}
gray *= alpha_o;
for (int c = 0; c < 3; ++c) {
float val = gray + input[l * 3 + c] * alpha_s;
output[l * 3 + c] = saturate_cast<DType>(val);
}
}
});
}
inline void RandomSaturation(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
const RandomEnhanceParam& param = nnvm::get<RandomEnhanceParam>(attrs.parsed);
Stream<cpu>* s = ctx.get_stream<cpu>();
Random<cpu>* prnd = ctx.requested[0].get_random<cpu, real_t>(s);
float alpha_s = std::uniform_real_distribution<float>(param.min_factor,
param.max_factor)(prnd->GetRndEngine());
AdjustSaturationImpl(alpha_s, ctx, inputs, req, outputs);
}
inline void RGB2HLSConvert(const float& src_r,
const float& src_g,
const float& src_b,
float* dst_h,
float* dst_l,
float* dst_s) {
float b = src_b / 255.f, g = src_g / 255.f, r = src_r / 255.f;
float h = 0.f, s = 0.f, l;
float vmin;
float vmax;
float diff;
vmax = vmin = r;
vmax = std::fmax(vmax, g);
vmax = std::fmax(vmax, b);
vmin = std::fmin(vmin, g);
vmin = std::fmin(vmin, b);
diff = vmax - vmin;
l = (vmax + vmin) * 0.5f;
if (diff > std::numeric_limits<float>::epsilon()) {
s = (l < 0.5f) * diff / (vmax + vmin);
s += (l >= 0.5f) * diff / (2.0f - vmax - vmin);
diff = 60.f / diff;
h = (vmax == r) * (g - b) * diff;
h += (vmax != r && vmax == g) * ((b - r) * diff + 120.f);
h += (vmax != r && vmax != g) * ((r - g) * diff + 240.f);
h += (h < 0.f) * 360.f;
}
*dst_h = h;
*dst_l = l;
*dst_s = s;
}
inline void HLS2RGBConvert(const float& src_h,
const float& src_l,
const float& src_s,
float* dst_r,
float* dst_g,
float* dst_b) {
static const int c_HlsSectorData[6][3] = {
{1, 3, 0}, {1, 0, 2}, {3, 0, 1}, {0, 2, 1}, {0, 1, 3}, {2, 1, 0}};
float h = src_h, l = src_l, s = src_s;
float b = l, g = l, r = l;
if (s != 0) {
float p2 = (l <= 0.5f) * l * (1 + s);
p2 += (l > 0.5f) * (l + s - l * s);
float p1 = 2 * l - p2;
h *= 1.f / 60.f;
if (h < 0) {
do {
h += 6;
} while (h < 0);
}
if (h >= 6) { // h + 6 >= 6 holds true for some h < 0
do {
h -= 6;
} while (h >= 6);
}
int sector = static_cast<int>(h);
h -= sector;
float tab[4];
tab[0] = p2;
tab[1] = p1;
tab[2] = p1 + (p2 - p1) * (1 - h);
tab[3] = p1 + (p2 - p1) * h;
b = tab[c_HlsSectorData[sector][0]];
g = tab[c_HlsSectorData[sector][1]];
r = tab[c_HlsSectorData[sector][2]];
}
*dst_b = b * 255.f;
*dst_g = g * 255.f;
*dst_r = r * 255.f;
}
inline void AdjustHueImpl(float alpha,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
int length = inputs[0].shape_[0] * inputs[0].shape_[1];
if (inputs[0].shape_[2] == 1)
return;
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
DType* input = inputs[0].dptr<DType>();
DType* output = outputs[0].dptr<DType>();
for (int i = 0; i < length; ++i) {
float h, l, s;
float r = static_cast<float>(*(input++));
float g = static_cast<float>(*(input++));
float b = static_cast<float>(*(input++));
RGB2HLSConvert(r, g, b, &h, &l, &s);
h += alpha * 360.f;
HLS2RGBConvert(h, l, s, &r, &g, &b);
*(output++) = saturate_cast<DType>(r);
*(output++) = saturate_cast<DType>(g);
*(output++) = saturate_cast<DType>(b);
}
});
}
inline void RandomHue(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
const RandomEnhanceParam& param = nnvm::get<RandomEnhanceParam>(attrs.parsed);
Stream<cpu>* s = ctx.get_stream<cpu>();
Random<cpu>* prnd = ctx.requested[0].get_random<cpu, real_t>(s);
float alpha = std::uniform_real_distribution<float>(param.min_factor,
param.max_factor)(prnd->GetRndEngine());
AdjustHueImpl(alpha, ctx, inputs, req, outputs);
}
struct RandomColorJitterParam : public dmlc::Parameter<RandomColorJitterParam> {
float brightness;
float contrast;
float saturation;
float hue;
DMLC_DECLARE_PARAMETER(RandomColorJitterParam) {
DMLC_DECLARE_FIELD(brightness).describe("How much to jitter brightness.");
DMLC_DECLARE_FIELD(contrast).describe("How much to jitter contrast.");
DMLC_DECLARE_FIELD(saturation).describe("How much to jitter saturation.");
DMLC_DECLARE_FIELD(hue).describe("How much to jitter hue.");
}
};
inline void RandomColorJitter(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
const RandomColorJitterParam& param = nnvm::get<RandomColorJitterParam>(attrs.parsed);
Stream<cpu>* s = ctx.get_stream<cpu>();
Random<cpu>* prnd = ctx.requested[0].get_random<cpu, real_t>(s);
int order[4] = {0, 1, 2, 3};
std::shuffle(order, order + 4, prnd->GetRndEngine());
bool flag = false;
for (int i = 0; i < 4; ++i) {
switch (order[i]) {
case 0:
if (param.brightness > 0) {
float alpha_b = 1.0 + std::uniform_real_distribution<float>(
-param.brightness, param.brightness)(prnd->GetRndEngine());
AdjustBrightnessImpl(alpha_b, ctx, flag ? outputs : inputs, req, outputs);
flag = true;
}
break;
case 1:
if (param.contrast > 0) {
float alpha_c = 1.0 + std::uniform_real_distribution<float>(
-param.contrast, param.contrast)(prnd->GetRndEngine());
AdjustContrastImpl(alpha_c, ctx, flag ? outputs : inputs, req, outputs);
flag = true;
}
break;
case 2:
if (param.saturation > 0) {
float alpha_s = 1.f + std::uniform_real_distribution<float>(
-param.saturation, param.saturation)(prnd->GetRndEngine());
AdjustSaturationImpl(alpha_s, ctx, flag ? outputs : inputs, req, outputs);
flag = true;
}
break;
case 3:
if (param.hue > 0) {
float alpha_h =
std::uniform_real_distribution<float>(-param.hue, param.hue)(prnd->GetRndEngine());
AdjustHueImpl(alpha_h, ctx, flag ? outputs : inputs, req, outputs);
flag = true;
}
break;
}
}
}
struct AdjustLightingParam : public dmlc::Parameter<AdjustLightingParam> {
mxnet::Tuple<float> alpha;
DMLC_DECLARE_PARAMETER(AdjustLightingParam) {
DMLC_DECLARE_FIELD(alpha).describe("The lighting alphas for the R, G, B channels.");
}
};
struct RandomLightingParam : public dmlc::Parameter<RandomLightingParam> {
float alpha_std;
DMLC_DECLARE_PARAMETER(RandomLightingParam) {
DMLC_DECLARE_FIELD(alpha_std).set_default(0.05).describe("Level of the lighting noise.");
}
};
inline void AdjustLightingImpl(const mxnet::Tuple<float>& alpha,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
static const float eig[3][3] = {{55.46 * -0.5675, 4.794 * 0.7192, 1.148 * 0.4009},
{55.46 * -0.5808, 4.794 * -0.0045, 1.148 * -0.8140},
{55.46 * -0.5836, 4.794 * -0.6948, 1.148 * 0.4203}};
int length = inputs[0].shape_[0] * inputs[0].shape_[1];
int channels = inputs[0].shape_[2];
if (channels == 1)
return;
float pca_r = eig[0][0] * alpha[0] + eig[0][1] * alpha[1] + eig[0][2] * alpha[2];
float pca_g = eig[1][0] * alpha[0] + eig[1][1] * alpha[1] + eig[1][2] * alpha[2];
float pca_b = eig[2][0] * alpha[0] + eig[2][1] * alpha[1] + eig[2][2] * alpha[2];
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
DType* output = outputs[0].dptr<DType>();
DType* input = inputs[0].dptr<DType>();
for (int i = 0; i < length; i++) {
int base_ind = 3 * i;
float in_r = static_cast<float>(input[base_ind]);
float in_g = static_cast<float>(input[base_ind + 1]);
float in_b = static_cast<float>(input[base_ind + 2]);
output[base_ind] = saturate_cast<DType>(in_r + pca_r);
output[base_ind + 1] = saturate_cast<DType>(in_g + pca_g);
output[base_ind + 2] = saturate_cast<DType>(in_b + pca_b);
}
});
}
inline void AdjustLighting(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
const AdjustLightingParam& param = nnvm::get<AdjustLightingParam>(attrs.parsed);
AdjustLightingImpl(param.alpha, ctx, inputs, req, outputs);
}
inline void RandomLighting(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
const RandomLightingParam& param = nnvm::get<RandomLightingParam>(attrs.parsed);
Stream<cpu>* s = ctx.get_stream<cpu>();
Random<cpu>* prnd = ctx.requested[0].get_random<cpu, float>(s);
std::normal_distribution<float> dist(0, param.alpha_std);
float alpha_r = dist(prnd->GetRndEngine());
float alpha_g = dist(prnd->GetRndEngine());
float alpha_b = dist(prnd->GetRndEngine());
AdjustLightingImpl({alpha_r, alpha_g, alpha_b}, ctx, inputs, req, outputs);
}
#define MXNET_REGISTER_IMAGE_AUG_OP(name) \
NNVM_REGISTER_OP(name) \
.set_num_inputs(1) \
.set_num_outputs(1) \
.set_attr<nnvm::FInplaceOption>("FInplaceOption", \
[](const NodeAttrs& attrs) { \
return std::vector<std::pair<int, int>>{{0, 0}}; \
}) \
.set_attr<mxnet::FInferShape>("FInferShape", ImageShape) \
.set_attr<nnvm::FInferType>("FInferType", ElemwiseType<1, 1>) \
.set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseNone{"_copy"}) \
.add_argument("data", "NDArray-or-Symbol", "The input.")
#define MXNET_REGISTER_IMAGE_RND_AUG_OP(name) \
MXNET_REGISTER_IMAGE_AUG_OP(name).set_attr<FResourceRequest>( \
"FResourceRequest", [](const NodeAttrs& attrs) { \
return std::vector<ResourceRequest>{ResourceRequest::kRandom}; \
})
} // namespace image
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_IMAGE_IMAGE_RANDOM_INL_H_
|
vla-5.c | // { dg-do compile }
void foo(int n, int i)
{
int A[n];
#pragma omp parallel sections lastprivate(A)
{
A[i] = 1;
}
}
|
segment.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% SSSSS EEEEE GGGG M M EEEEE N N TTTTT %
% SS E G MM MM E NN N T %
% SSS EEE G GGG M M M EEE N N N T %
% SS E G G M M E N NN T %
% SSSSS EEEEE GGGG M M EEEEE N N T %
% %
% %
% MagickCore Methods to Segment an Image with Thresholding Fuzzy c-Means %
% %
% Software Design %
% Cristy %
% April 1993 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Segment segments an image by analyzing the histograms of the color
% components and identifying units that are homogeneous with the fuzzy
% c-means technique. The scale-space filter analyzes the histograms of
% the three color components of the image and identifies a set of
% classes. The extents of each class is used to coarsely segment the
% image with thresholding. The color associated with each class is
% determined by the mean color of all pixels within the extents of a
% particular class. Finally, any unclassified pixels are assigned to
% the closest class with the fuzzy c-means technique.
%
% The fuzzy c-Means algorithm can be summarized as follows:
%
% o Build a histogram, one for each color component of the image.
%
% o For each histogram, successively apply the scale-space filter and
% build an interval tree of zero crossings in the second derivative
% at each scale. Analyze this scale-space ''fingerprint'' to
% determine which peaks and valleys in the histogram are most
% predominant.
%
% o The fingerprint defines intervals on the axis of the histogram.
% Each interval contains either a minima or a maxima in the original
% signal. If each color component lies within the maxima interval,
% that pixel is considered ''classified'' and is assigned an unique
% class number.
%
% o Any pixel that fails to be classified in the above thresholding
% pass is classified using the fuzzy c-Means technique. It is
% assigned to one of the classes discovered in the histogram analysis
% phase.
%
% The fuzzy c-Means technique attempts to cluster a pixel by finding
% the local minima of the generalized within group sum of squared error
% objective function. A pixel is assigned to the closest class of
% which the fuzzy membership has a maximum value.
%
% Segment is strongly based on software written by Andy Gallo,
% University of Delaware.
%
% The following reference was used in creating this program:
%
% Young Won Lim, Sang Uk Lee, "On The Color Image Segmentation
% Algorithm Based on the Thresholding and the Fuzzy c-Means
% Techniques", Pattern Recognition, Volume 23, Number 9, pages
% 935-952, 1990.
%
%
*/
#include "MagickCore/studio.h"
#include "MagickCore/cache.h"
#include "MagickCore/color.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
/*
Define declarations.
*/
#define MaxDimension 3
#define DeltaTau 0.5f
#if defined(FastClassify)
#define WeightingExponent 2.0
#define SegmentPower(ratio) (ratio)
#else
#define WeightingExponent 2.5
#define SegmentPower(ratio) pow(ratio,(double) (1.0/(weighting_exponent-1.0)));
#endif
#define Tau 5.2f
/*
Typedef declarations.
*/
typedef struct _ExtentPacket
{
double
center;
ssize_t
index,
left,
right;
} ExtentPacket;
typedef struct _Cluster
{
struct _Cluster
*next;
ExtentPacket
red,
green,
blue;
ssize_t
count,
id;
} Cluster;
typedef struct _IntervalTree
{
double
tau;
ssize_t
left,
right;
double
mean_stability,
stability;
struct _IntervalTree
*sibling,
*child;
} IntervalTree;
typedef struct _ZeroCrossing
{
double
tau,
histogram[256];
short
crossings[256];
} ZeroCrossing;
/*
Constant declarations.
*/
static const int
Blue = 2,
Green = 1,
Red = 0,
SafeMargin = 3,
TreeLength = 600;
/*
Method prototypes.
*/
static double
OptimalTau(const ssize_t *,const double,const double,const double,
const double,short *);
static ssize_t
DefineRegion(const short *,ExtentPacket *);
static void
FreeNodes(IntervalTree *),
InitializeHistogram(const Image *,ssize_t **,ExceptionInfo *),
ScaleSpace(const ssize_t *,const double,double *),
ZeroCrossHistogram(double *,const double,short *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l a s s i f y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Classify() defines one or more classes. Each pixel is thresholded to
% determine which class it belongs to. If the class is not identified it is
% assigned to the closest class based on the fuzzy c-Means technique.
%
% The format of the Classify method is:
%
% MagickBooleanType Classify(Image *image,short **extrema,
% const double cluster_threshold,const double weighting_exponent,
% const MagickBooleanType verbose,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o extrema: Specifies a pointer to an array of integers. They
% represent the peaks and valleys of the histogram for each color
% component.
%
% o cluster_threshold: This double represents the minimum number of
% pixels contained in a hexahedra before it can be considered valid
% (expressed as a percentage).
%
% o weighting_exponent: Specifies the membership weighting exponent.
%
% o verbose: A value greater than zero prints detailed information about
% the identified classes.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType Classify(Image *image,short **extrema,
const double cluster_threshold,const double weighting_exponent,
const MagickBooleanType verbose,ExceptionInfo *exception)
{
#define SegmentImageTag "Segment/Image"
#define ThrowClassifyException(severity,tag,label) \
{\
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) \
{ \
next_cluster=cluster->next; \
cluster=(Cluster *) RelinquishMagickMemory(cluster); \
} \
if (squares != (double *) NULL) \
{ \
squares-=255; \
free_squares=squares; \
free_squares=(double *) RelinquishMagickMemory(free_squares); \
} \
ThrowBinaryException(severity,tag,label); \
}
CacheView
*image_view;
Cluster
*cluster,
*head,
*last_cluster,
*next_cluster;
double
*free_squares;
ExtentPacket
blue,
green,
red;
MagickOffsetType
progress;
MagickStatusType
status;
ssize_t
i;
double
*squares;
size_t
number_clusters;
ssize_t
count,
y;
/*
Form clusters.
*/
cluster=(Cluster *) NULL;
head=(Cluster *) NULL;
squares=(double *) NULL;
(void) memset(&red,0,sizeof(red));
(void) memset(&green,0,sizeof(green));
(void) memset(&blue,0,sizeof(blue));
while (DefineRegion(extrema[Red],&red) != 0)
{
green.index=0;
while (DefineRegion(extrema[Green],&green) != 0)
{
blue.index=0;
while (DefineRegion(extrema[Blue],&blue) != 0)
{
/*
Allocate a new class.
*/
if (head != (Cluster *) NULL)
{
cluster->next=(Cluster *) AcquireQuantumMemory(1,
sizeof(*cluster->next));
cluster=cluster->next;
}
else
{
cluster=(Cluster *) AcquireQuantumMemory(1,sizeof(*cluster));
head=cluster;
}
if (cluster == (Cluster *) NULL)
ThrowClassifyException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Initialize a new class.
*/
(void) memset(cluster,0,sizeof(*cluster));
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
}
}
}
if (head == (Cluster *) NULL)
{
/*
No classes were identified-- create one.
*/
cluster=(Cluster *) AcquireQuantumMemory(1,sizeof(*cluster));
if (cluster == (Cluster *) NULL)
ThrowClassifyException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Initialize a new class.
*/
(void) memset(cluster,0,sizeof(*cluster));
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
head=cluster;
}
/*
Count the pixels for each cluster.
*/
status=MagickTrue;
count=0;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*p;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
PixelInfo
pixel;
pixel.red=(double) ScaleQuantumToChar(GetPixelRed(image,p));
pixel.green=(double) ScaleQuantumToChar(GetPixelGreen(image,p));
pixel.blue=(double) ScaleQuantumToChar(GetPixelBlue(image,p));
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
if ((pixel.red >= (double) (cluster->red.left-SafeMargin)) &&
(pixel.red <= (double) (cluster->red.right+SafeMargin)) &&
(pixel.green >= (double) (cluster->green.left-SafeMargin)) &&
(pixel.green <= (double) (cluster->green.right+SafeMargin)) &&
(pixel.blue >= (double) (cluster->blue.left-SafeMargin)) &&
(pixel.blue <= (double) (cluster->blue.right+SafeMargin)))
{
/*
Count this pixel.
*/
count++;
cluster->red.center+=pixel.red;
cluster->green.center+=pixel.green;
cluster->blue.center+=pixel.blue;
cluster->count++;
break;
}
p+=GetPixelChannels(image);
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SegmentImageTag,progress,2*image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
/*
Remove clusters that do not meet minimum cluster threshold.
*/
count=0;
last_cluster=head;
next_cluster=head;
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
if ((cluster->count > 0) &&
(cluster->count >= (count*cluster_threshold/100.0)))
{
/*
Initialize cluster.
*/
cluster->id=count;
cluster->red.center/=cluster->count;
cluster->green.center/=cluster->count;
cluster->blue.center/=cluster->count;
count++;
last_cluster=cluster;
continue;
}
/*
Delete cluster.
*/
if (cluster == head)
head=next_cluster;
else
last_cluster->next=next_cluster;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
number_clusters=(size_t) count;
if (verbose != MagickFalse)
{
/*
Print cluster statistics.
*/
(void) FormatLocaleFile(stdout,"Fuzzy C-means Statistics\n");
(void) FormatLocaleFile(stdout,"===================\n\n");
(void) FormatLocaleFile(stdout,"\tCluster Threshold = %g\n",(double)
cluster_threshold);
(void) FormatLocaleFile(stdout,"\tWeighting Exponent = %g\n",(double)
weighting_exponent);
(void) FormatLocaleFile(stdout,"\tTotal Number of Clusters = %.20g\n\n",
(double) number_clusters);
/*
Print the total number of points per cluster.
*/
(void) FormatLocaleFile(stdout,"\n\nNumber of Vectors Per Cluster\n");
(void) FormatLocaleFile(stdout,"=============================\n\n");
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
(void) FormatLocaleFile(stdout,"Cluster #%.20g = %.20g\n",(double)
cluster->id,(double) cluster->count);
/*
Print the cluster extents.
*/
(void) FormatLocaleFile(stdout,
"\n\n\nCluster Extents: (Vector Size: %d)\n",MaxDimension);
(void) FormatLocaleFile(stdout,"================");
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
(void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double)
cluster->id);
(void) FormatLocaleFile(stdout,
"%.20g-%.20g %.20g-%.20g %.20g-%.20g\n",(double)
cluster->red.left,(double) cluster->red.right,(double)
cluster->green.left,(double) cluster->green.right,(double)
cluster->blue.left,(double) cluster->blue.right);
}
/*
Print the cluster center values.
*/
(void) FormatLocaleFile(stdout,
"\n\n\nCluster Center Values: (Vector Size: %d)\n",MaxDimension);
(void) FormatLocaleFile(stdout,"=====================");
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
(void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double)
cluster->id);
(void) FormatLocaleFile(stdout,"%g %g %g\n",(double)
cluster->red.center,(double) cluster->green.center,(double)
cluster->blue.center);
}
(void) FormatLocaleFile(stdout,"\n");
}
if (number_clusters > 256)
ThrowClassifyException(ImageError,"TooManyClusters",image->filename);
/*
Speed up distance calculations.
*/
squares=(double *) AcquireQuantumMemory(513UL,sizeof(*squares));
if (squares == (double *) NULL)
ThrowClassifyException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
squares+=255;
for (i=(-255); i <= 255; i++)
squares[i]=(double) i*(double) i;
/*
Allocate image colormap.
*/
if (AcquireImageColormap(image,number_clusters,exception) == MagickFalse)
ThrowClassifyException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
i=0;
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
image->colormap[i].red=(double) ScaleCharToQuantum((unsigned char)
(cluster->red.center+0.5));
image->colormap[i].green=(double) ScaleCharToQuantum((unsigned char)
(cluster->green.center+0.5));
image->colormap[i].blue=(double) ScaleCharToQuantum((unsigned char)
(cluster->blue.center+0.5));
i++;
}
/*
Do course grain classes.
*/
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Cluster
*c;
const PixelInfo
*magick_restrict p;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
PixelInfo
pixel;
SetPixelIndex(image,(Quantum) 0,q);
pixel.red=(double) ScaleQuantumToChar(GetPixelRed(image,q));
pixel.green=(double) ScaleQuantumToChar(GetPixelGreen(image,q));
pixel.blue=(double) ScaleQuantumToChar(GetPixelBlue(image,q));
for (c=head; c != (Cluster *) NULL; c=c->next)
{
if ((pixel.red >= (double) (c->red.left-SafeMargin)) &&
(pixel.red <= (double) (c->red.right+SafeMargin)) &&
(pixel.green >= (double) (c->green.left-SafeMargin)) &&
(pixel.green <= (double) (c->green.right+SafeMargin)) &&
(pixel.blue >= (double) (c->blue.left-SafeMargin)) &&
(pixel.blue <= (double) (c->blue.right+SafeMargin)))
{
/*
Classify this pixel.
*/
SetPixelIndex(image,(Quantum) c->id,q);
break;
}
}
if (c == (Cluster *) NULL)
{
double
distance_squared,
local_minima,
numerator,
ratio,
sum;
ssize_t
j,
k;
/*
Compute fuzzy membership.
*/
local_minima=0.0;
for (j=0; j < (ssize_t) image->colors; j++)
{
sum=0.0;
p=image->colormap+j;
distance_squared=
squares[(ssize_t) (pixel.red-ScaleQuantumToChar(p->red))]+
squares[(ssize_t) (pixel.green-ScaleQuantumToChar(p->green))]+
squares[(ssize_t) (pixel.blue-ScaleQuantumToChar(p->blue))];
numerator=distance_squared;
for (k=0; k < (ssize_t) image->colors; k++)
{
p=image->colormap+k;
distance_squared=
squares[(ssize_t) (pixel.red-ScaleQuantumToChar(p->red))]+
squares[(ssize_t) (pixel.green-ScaleQuantumToChar(p->green))]+
squares[(ssize_t) (pixel.blue-ScaleQuantumToChar(p->blue))];
ratio=numerator/distance_squared;
sum+=SegmentPower(ratio);
}
if ((sum != 0.0) && ((1.0/sum) > local_minima))
{
/*
Classify this pixel.
*/
local_minima=1.0/sum;
SetPixelIndex(image,(Quantum) j,q);
}
}
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SegmentImageTag,progress,2*image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
status&=SyncImage(image,exception);
/*
Relinquish resources.
*/
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
squares-=255;
free_squares=squares;
free_squares=(double *) RelinquishMagickMemory(free_squares);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n s o l i d a t e C r o s s i n g s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConsolidateCrossings() guarantees that an even number of zero crossings
% always lie between two crossings.
%
% The format of the ConsolidateCrossings method is:
%
% ConsolidateCrossings(ZeroCrossing *zero_crossing,
% const size_t number_crossings)
%
% A description of each parameter follows.
%
% o zero_crossing: Specifies an array of structures of type ZeroCrossing.
%
% o number_crossings: This size_t specifies the number of elements
% in the zero_crossing array.
%
*/
static void ConsolidateCrossings(ZeroCrossing *zero_crossing,
const size_t number_crossings)
{
ssize_t
i,
j,
k,
l;
ssize_t
center,
correct,
count,
left,
right;
/*
Consolidate zero crossings.
*/
for (i=(ssize_t) number_crossings-1; i >= 0; i--)
for (j=0; j <= 255; j++)
{
if (zero_crossing[i].crossings[j] == 0)
continue;
/*
Find the entry that is closest to j and still preserves the
property that there are an even number of crossings between
intervals.
*/
for (k=j-1; k > 0; k--)
if (zero_crossing[i+1].crossings[k] != 0)
break;
left=MagickMax(k,0);
center=j;
for (k=j+1; k < 255; k++)
if (zero_crossing[i+1].crossings[k] != 0)
break;
right=MagickMin(k,255);
/*
K is the zero crossing just left of j.
*/
for (k=j-1; k > 0; k--)
if (zero_crossing[i].crossings[k] != 0)
break;
if (k < 0)
k=0;
/*
Check center for an even number of crossings between k and j.
*/
correct=(-1);
if (zero_crossing[i+1].crossings[j] != 0)
{
count=0;
for (l=k+1; l < center; l++)
if (zero_crossing[i+1].crossings[l] != 0)
count++;
if (((count % 2) == 0) && (center != k))
correct=center;
}
/*
Check left for an even number of crossings between k and j.
*/
if (correct == -1)
{
count=0;
for (l=k+1; l < left; l++)
if (zero_crossing[i+1].crossings[l] != 0)
count++;
if (((count % 2) == 0) && (left != k))
correct=left;
}
/*
Check right for an even number of crossings between k and j.
*/
if (correct == -1)
{
count=0;
for (l=k+1; l < right; l++)
if (zero_crossing[i+1].crossings[l] != 0)
count++;
if (((count % 2) == 0) && (right != k))
correct=right;
}
l=(ssize_t) zero_crossing[i].crossings[j];
zero_crossing[i].crossings[j]=0;
if (correct != -1)
zero_crossing[i].crossings[correct]=(short) l;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e f i n e R e g i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DefineRegion() defines the left and right boundaries of a peak region.
%
% The format of the DefineRegion method is:
%
% ssize_t DefineRegion(const short *extrema,ExtentPacket *extents)
%
% A description of each parameter follows.
%
% o extrema: Specifies a pointer to an array of integers. They
% represent the peaks and valleys of the histogram for each color
% component.
%
% o extents: This pointer to an ExtentPacket represent the extends
% of a particular peak or valley of a color component.
%
*/
static ssize_t DefineRegion(const short *extrema,ExtentPacket *extents)
{
/*
Initialize to default values.
*/
extents->left=0;
extents->center=0.0;
extents->right=255;
/*
Find the left side (maxima).
*/
for ( ; extents->index <= 255; extents->index++)
if (extrema[extents->index] > 0)
break;
if (extents->index > 255)
return(MagickFalse); /* no left side - no region exists */
extents->left=extents->index;
/*
Find the right side (minima).
*/
for ( ; extents->index <= 255; extents->index++)
if (extrema[extents->index] < 0)
break;
extents->right=extents->index-1;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e r i v a t i v e H i s t o g r a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DerivativeHistogram() determines the derivative of the histogram using
% central differencing.
%
% The format of the DerivativeHistogram method is:
%
% DerivativeHistogram(const double *histogram,
% double *derivative)
%
% A description of each parameter follows.
%
% o histogram: Specifies an array of doubles representing the number
% of pixels for each intensity of a particular color component.
%
% o derivative: This array of doubles is initialized by
% DerivativeHistogram to the derivative of the histogram using central
% differencing.
%
*/
static void DerivativeHistogram(const double *histogram,
double *derivative)
{
ssize_t
i,
n;
/*
Compute endpoints using second order polynomial interpolation.
*/
n=255;
derivative[0]=(-1.5*histogram[0]+2.0*histogram[1]-0.5*histogram[2]);
derivative[n]=(0.5*histogram[n-2]-2.0*histogram[n-1]+1.5*histogram[n]);
/*
Compute derivative using central differencing.
*/
for (i=1; i < n; i++)
derivative[i]=(histogram[i+1]-histogram[i-1])/2.0;
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e D y n a m i c T h r e s h o l d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDynamicThreshold() returns the dynamic threshold for an image.
%
% The format of the GetImageDynamicThreshold method is:
%
% MagickBooleanType GetImageDynamicThreshold(const Image *image,
% const double cluster_threshold,const double smooth_threshold,
% PixelInfo *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cluster_threshold: This double represents the minimum number of
% pixels contained in a hexahedra before it can be considered valid
% (expressed as a percentage).
%
% o smooth_threshold: the smoothing threshold eliminates noise in the second
% derivative of the histogram. As the value is increased, you can expect a
% smoother second derivative.
%
% o pixel: return the dynamic threshold here.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageDynamicThreshold(const Image *image,
const double cluster_threshold,const double smooth_threshold,
PixelInfo *pixel,ExceptionInfo *exception)
{
Cluster
*background,
*cluster,
*object,
*head,
*last_cluster,
*next_cluster;
ExtentPacket
blue,
green,
red;
MagickBooleanType
proceed;
double
threshold;
const Quantum
*p;
ssize_t
i,
x;
short
*extrema[MaxDimension];
ssize_t
count,
*histogram[MaxDimension],
y;
/*
Allocate histogram and extrema.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
GetPixelInfo(image,pixel);
for (i=0; i < MaxDimension; i++)
{
histogram[i]=(ssize_t *) AcquireQuantumMemory(256UL,sizeof(**histogram));
extrema[i]=(short *) AcquireQuantumMemory(256UL,sizeof(**histogram));
if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL))
{
for (i-- ; i >= 0; i--)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
}
/*
Initialize histogram.
*/
InitializeHistogram(image,histogram,exception);
(void) OptimalTau(histogram[Red],Tau,0.2f,DeltaTau,
(smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Red]);
(void) OptimalTau(histogram[Green],Tau,0.2f,DeltaTau,
(smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Green]);
(void) OptimalTau(histogram[Blue],Tau,0.2f,DeltaTau,
(smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Blue]);
/*
Form clusters.
*/
cluster=(Cluster *) NULL;
head=(Cluster *) NULL;
(void) memset(&red,0,sizeof(red));
(void) memset(&green,0,sizeof(green));
(void) memset(&blue,0,sizeof(blue));
while (DefineRegion(extrema[Red],&red) != 0)
{
green.index=0;
while (DefineRegion(extrema[Green],&green) != 0)
{
blue.index=0;
while (DefineRegion(extrema[Blue],&blue) != 0)
{
/*
Allocate a new class.
*/
if (head != (Cluster *) NULL)
{
cluster->next=(Cluster *) AcquireQuantumMemory(1,
sizeof(*cluster->next));
cluster=cluster->next;
}
else
{
cluster=(Cluster *) AcquireQuantumMemory(1,sizeof(*cluster));
head=cluster;
}
if (cluster == (Cluster *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
return(MagickFalse);
}
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
}
}
}
if (head == (Cluster *) NULL)
{
/*
No classes were identified-- create one.
*/
cluster=(Cluster *) AcquireQuantumMemory(1,sizeof(*cluster));
if (cluster == (Cluster *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
head=cluster;
}
/*
Count the pixels for each cluster.
*/
count=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
double
b,
g,
r;
r=(double) ScaleQuantumToChar(GetPixelRed(image,p));
g=(double) ScaleQuantumToChar(GetPixelGreen(image,p));
b=(double) ScaleQuantumToChar(GetPixelBlue(image,p));
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
if ((r >= (double) (cluster->red.left-SafeMargin)) &&
(r <= (double) (cluster->red.right+SafeMargin)) &&
(g >= (double) (cluster->green.left-SafeMargin)) &&
(g <= (double) (cluster->green.right+SafeMargin)) &&
(b >= (double) (cluster->blue.left-SafeMargin)) &&
(b <= (double) (cluster->blue.right+SafeMargin)))
{
/*
Count this pixel.
*/
count++;
cluster->red.center+=r;
cluster->green.center+=g;
cluster->blue.center+=b;
cluster->count++;
break;
}
p+=GetPixelChannels(image);
}
proceed=SetImageProgress(image,SegmentImageTag,(MagickOffsetType) y,
2*image->rows);
if (proceed == MagickFalse)
break;
}
/*
Remove clusters that do not meet minimum cluster threshold.
*/
count=0;
last_cluster=head;
next_cluster=head;
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
if ((cluster->count > 0) &&
(cluster->count >= (count*cluster_threshold/100.0)))
{
/*
Initialize cluster.
*/
cluster->id=count;
cluster->red.center/=cluster->count;
cluster->green.center/=cluster->count;
cluster->blue.center/=cluster->count;
count++;
last_cluster=cluster;
continue;
}
/*
Delete cluster.
*/
if (cluster == head)
head=next_cluster;
else
last_cluster->next=next_cluster;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
object=head;
background=head;
if (count > 1)
{
object=head->next;
for (cluster=object; cluster->next != (Cluster *) NULL; )
{
if (cluster->count < object->count)
object=cluster;
cluster=cluster->next;
}
background=head->next;
for (cluster=background; cluster->next != (Cluster *) NULL; )
{
if (cluster->count > background->count)
background=cluster;
cluster=cluster->next;
}
}
if (background != (Cluster *) NULL)
{
threshold=(background->red.center+object->red.center)/2.0;
pixel->red=(double) ScaleCharToQuantum((unsigned char)
(threshold+0.5));
threshold=(background->green.center+object->green.center)/2.0;
pixel->green=(double) ScaleCharToQuantum((unsigned char)
(threshold+0.5));
threshold=(background->blue.center+object->blue.center)/2.0;
pixel->blue=(double) ScaleCharToQuantum((unsigned char)
(threshold+0.5));
}
/*
Relinquish resources.
*/
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
for (i=0; i < MaxDimension; i++)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ I n i t i a l i z e H i s t o g r a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InitializeHistogram() computes the histogram for an image.
%
% The format of the InitializeHistogram method is:
%
% InitializeHistogram(const Image *image,ssize_t **histogram)
%
% A description of each parameter follows.
%
% o image: Specifies a pointer to an Image structure; returned from
% ReadImage.
%
% o histogram: Specifies an array of integers representing the number
% of pixels for each intensity of a particular color component.
%
*/
static void InitializeHistogram(const Image *image,ssize_t **histogram,
ExceptionInfo *exception)
{
const Quantum
*p;
ssize_t
i,
x;
ssize_t
y;
/*
Initialize histogram.
*/
for (i=0; i <= 255; i++)
{
histogram[Red][i]=0;
histogram[Green][i]=0;
histogram[Blue][i]=0;
}
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
histogram[Red][(ssize_t) ScaleQuantumToChar(GetPixelRed(image,p))]++;
histogram[Green][(ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p))]++;
histogram[Blue][(ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p))]++;
p+=GetPixelChannels(image);
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ I n i t i a l i z e I n t e r v a l T r e e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InitializeIntervalTree() initializes an interval tree from the lists of
% zero crossings.
%
% The format of the InitializeIntervalTree method is:
%
% InitializeIntervalTree(IntervalTree **list,ssize_t *number_nodes,
% IntervalTree *node)
%
% A description of each parameter follows.
%
% o zero_crossing: Specifies an array of structures of type ZeroCrossing.
%
% o number_crossings: This size_t specifies the number of elements
% in the zero_crossing array.
%
*/
static void InitializeList(IntervalTree **list,ssize_t *number_nodes,
IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
if (node->child == (IntervalTree *) NULL)
list[(*number_nodes)++]=node;
InitializeList(list,number_nodes,node->sibling);
InitializeList(list,number_nodes,node->child);
}
static void MeanStability(IntervalTree *node)
{
IntervalTree
*child;
if (node == (IntervalTree *) NULL)
return;
node->mean_stability=0.0;
child=node->child;
if (child != (IntervalTree *) NULL)
{
ssize_t
count;
double
sum;
sum=0.0;
count=0;
for ( ; child != (IntervalTree *) NULL; child=child->sibling)
{
sum+=child->stability;
count++;
}
node->mean_stability=sum/(double) count;
}
MeanStability(node->sibling);
MeanStability(node->child);
}
static void Stability(IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
if (node->child == (IntervalTree *) NULL)
node->stability=0.0;
else
node->stability=node->tau-(node->child)->tau;
Stability(node->sibling);
Stability(node->child);
}
static IntervalTree *InitializeIntervalTree(const ZeroCrossing *zero_crossing,
const size_t number_crossings)
{
IntervalTree
*head,
**list,
*node,
*root;
ssize_t
i;
ssize_t
j,
k,
left,
number_nodes;
/*
Allocate interval tree.
*/
list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength,
sizeof(*list));
if (list == (IntervalTree **) NULL)
return((IntervalTree *) NULL);
/*
The root is the entire histogram.
*/
root=(IntervalTree *) AcquireCriticalMemory(sizeof(*root));
root->child=(IntervalTree *) NULL;
root->sibling=(IntervalTree *) NULL;
root->tau=0.0;
root->left=0;
root->right=255;
root->mean_stability=0.0;
root->stability=0.0;
(void) memset(list,0,TreeLength*sizeof(*list));
for (i=(-1); i < (ssize_t) number_crossings; i++)
{
/*
Initialize list with all nodes with no children.
*/
number_nodes=0;
InitializeList(list,&number_nodes,root);
/*
Split list.
*/
for (j=0; j < number_nodes; j++)
{
head=list[j];
left=head->left;
node=head;
for (k=head->left+1; k < head->right; k++)
{
if (zero_crossing[i+1].crossings[k] != 0)
{
if (node == head)
{
node->child=(IntervalTree *) AcquireQuantumMemory(1,
sizeof(*node->child));
node=node->child;
}
else
{
node->sibling=(IntervalTree *) AcquireQuantumMemory(1,
sizeof(*node->sibling));
node=node->sibling;
}
if (node == (IntervalTree *) NULL)
{
list=(IntervalTree **) RelinquishMagickMemory(list);
FreeNodes(root);
return((IntervalTree *) NULL);
}
node->tau=zero_crossing[i+1].tau;
node->child=(IntervalTree *) NULL;
node->sibling=(IntervalTree *) NULL;
node->left=left;
node->right=k;
left=k;
}
}
if (left != head->left)
{
node->sibling=(IntervalTree *) AcquireQuantumMemory(1,
sizeof(*node->sibling));
node=node->sibling;
if (node == (IntervalTree *) NULL)
{
list=(IntervalTree **) RelinquishMagickMemory(list);
FreeNodes(root);
return((IntervalTree *) NULL);
}
node->tau=zero_crossing[i+1].tau;
node->child=(IntervalTree *) NULL;
node->sibling=(IntervalTree *) NULL;
node->left=left;
node->right=head->right;
}
}
}
/*
Determine the stability: difference between a nodes tau and its child.
*/
Stability(root->child);
MeanStability(root->child);
list=(IntervalTree **) RelinquishMagickMemory(list);
return(root);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ O p t i m a l T a u %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OptimalTau() finds the optimal tau for each band of the histogram.
%
% The format of the OptimalTau method is:
%
% double OptimalTau(const ssize_t *histogram,const double max_tau,
% const double min_tau,const double delta_tau,
% const double smooth_threshold,short *extrema)
%
% A description of each parameter follows.
%
% o histogram: Specifies an array of integers representing the number
% of pixels for each intensity of a particular color component.
%
% o extrema: Specifies a pointer to an array of integers. They
% represent the peaks and valleys of the histogram for each color
% component.
%
*/
static void ActiveNodes(IntervalTree **list,ssize_t *number_nodes,
IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
if (node->stability >= node->mean_stability)
{
list[(*number_nodes)++]=node;
ActiveNodes(list,number_nodes,node->sibling);
}
else
{
ActiveNodes(list,number_nodes,node->sibling);
ActiveNodes(list,number_nodes,node->child);
}
}
static void FreeNodes(IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
FreeNodes(node->sibling);
FreeNodes(node->child);
node=(IntervalTree *) RelinquishMagickMemory(node);
}
static double OptimalTau(const ssize_t *histogram,const double max_tau,
const double min_tau,const double delta_tau,const double smooth_threshold,
short *extrema)
{
double
average_tau,
*derivative,
*second_derivative,
tau,
value;
IntervalTree
**list,
*node,
*root;
MagickBooleanType
peak;
ssize_t
i,
x;
size_t
count,
number_crossings;
ssize_t
index,
j,
k,
number_nodes;
ZeroCrossing
*zero_crossing;
/*
Allocate interval tree.
*/
list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength,
sizeof(*list));
if (list == (IntervalTree **) NULL)
return(0.0);
/*
Allocate zero crossing list.
*/
count=(size_t) ((max_tau-min_tau)/delta_tau)+2;
zero_crossing=(ZeroCrossing *) AcquireQuantumMemory((size_t) count,
sizeof(*zero_crossing));
if (zero_crossing == (ZeroCrossing *) NULL)
{
list=(IntervalTree **) RelinquishMagickMemory(list);
return(0.0);
}
for (i=0; i < (ssize_t) count; i++)
zero_crossing[i].tau=(-1.0);
/*
Initialize zero crossing list.
*/
derivative=(double *) AcquireCriticalMemory(256*sizeof(*derivative));
second_derivative=(double *) AcquireCriticalMemory(256*
sizeof(*second_derivative));
i=0;
for (tau=max_tau; tau >= min_tau; tau-=delta_tau)
{
zero_crossing[i].tau=tau;
ScaleSpace(histogram,tau,zero_crossing[i].histogram);
DerivativeHistogram(zero_crossing[i].histogram,derivative);
DerivativeHistogram(derivative,second_derivative);
ZeroCrossHistogram(second_derivative,smooth_threshold,
zero_crossing[i].crossings);
i++;
}
/*
Add an entry for the original histogram.
*/
zero_crossing[i].tau=0.0;
for (j=0; j <= 255; j++)
zero_crossing[i].histogram[j]=(double) histogram[j];
DerivativeHistogram(zero_crossing[i].histogram,derivative);
DerivativeHistogram(derivative,second_derivative);
ZeroCrossHistogram(second_derivative,smooth_threshold,
zero_crossing[i].crossings);
number_crossings=(size_t) i;
derivative=(double *) RelinquishMagickMemory(derivative);
second_derivative=(double *) RelinquishMagickMemory(second_derivative);
/*
Ensure the scale-space fingerprints form lines in scale-space, not loops.
*/
ConsolidateCrossings(zero_crossing,number_crossings);
/*
Force endpoints to be included in the interval.
*/
for (i=0; i <= (ssize_t) number_crossings; i++)
{
for (j=0; j < 255; j++)
if (zero_crossing[i].crossings[j] != 0)
break;
zero_crossing[i].crossings[0]=(-zero_crossing[i].crossings[j]);
for (j=255; j > 0; j--)
if (zero_crossing[i].crossings[j] != 0)
break;
zero_crossing[i].crossings[255]=(-zero_crossing[i].crossings[j]);
}
/*
Initialize interval tree.
*/
root=InitializeIntervalTree(zero_crossing,number_crossings);
if (root == (IntervalTree *) NULL)
{
zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing);
list=(IntervalTree **) RelinquishMagickMemory(list);
return(0.0);
}
/*
Find active nodes: Stability is greater (or equal) to the mean stability of
its children.
*/
number_nodes=0;
ActiveNodes(list,&number_nodes,root->child);
/*
Initialize extrema.
*/
for (i=0; i <= 255; i++)
extrema[i]=0;
for (i=0; i < number_nodes; i++)
{
/*
Find this tau in zero crossings list.
*/
k=0;
node=list[i];
for (j=0; j <= (ssize_t) number_crossings; j++)
if (zero_crossing[j].tau == node->tau)
k=j;
/*
Find the value of the peak.
*/
peak=zero_crossing[k].crossings[node->right] == -1 ? MagickTrue :
MagickFalse;
index=node->left;
value=zero_crossing[k].histogram[index];
for (x=node->left; x <= node->right; x++)
{
if (peak != MagickFalse)
{
if (zero_crossing[k].histogram[x] > value)
{
value=zero_crossing[k].histogram[x];
index=x;
}
}
else
if (zero_crossing[k].histogram[x] < value)
{
value=zero_crossing[k].histogram[x];
index=x;
}
}
for (x=node->left; x <= node->right; x++)
{
if (index == 0)
index=256;
if (peak != MagickFalse)
extrema[x]=(short) index;
else
extrema[x]=(short) (-index);
}
}
/*
Determine the average tau.
*/
average_tau=0.0;
for (i=0; i < number_nodes; i++)
average_tau+=list[i]->tau;
average_tau*=PerceptibleReciprocal((double) number_nodes);
/*
Relinquish resources.
*/
FreeNodes(root);
zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing);
list=(IntervalTree **) RelinquishMagickMemory(list);
return(average_tau);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S c a l e S p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleSpace() performs a scale-space filter on the 1D histogram.
%
% The format of the ScaleSpace method is:
%
% ScaleSpace(const ssize_t *histogram,const double tau,
% double *scale_histogram)
%
% A description of each parameter follows.
%
% o histogram: Specifies an array of doubles representing the number
% of pixels for each intensity of a particular color component.
%
*/
static void ScaleSpace(const ssize_t *histogram,const double tau,
double *scale_histogram)
{
double
alpha,
beta,
*gamma,
sum;
ssize_t
u,
x;
gamma=(double *) AcquireQuantumMemory(256,sizeof(*gamma));
if (gamma == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,"UnableToAllocateGammaMap");
alpha=PerceptibleReciprocal(tau*sqrt(2.0*MagickPI));
beta=(-1.0*PerceptibleReciprocal(2.0*tau*tau));
for (x=0; x <= 255; x++)
gamma[x]=0.0;
for (x=0; x <= 255; x++)
{
gamma[x]=exp((double) beta*x*x);
if (gamma[x] < MagickEpsilon)
break;
}
for (x=0; x <= 255; x++)
{
sum=0.0;
for (u=0; u <= 255; u++)
sum+=(double) histogram[u]*gamma[MagickAbsoluteValue(x-u)];
scale_histogram[x]=alpha*sum;
}
gamma=(double *) RelinquishMagickMemory(gamma);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e g m e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SegmentImage() segment an image by analyzing the histograms of the color
% components and identifying units that are homogeneous with the fuzzy
% C-means technique.
%
% The format of the SegmentImage method is:
%
% MagickBooleanType SegmentImage(Image *image,
% const ColorspaceType colorspace,const MagickBooleanType verbose,
% const double cluster_threshold,const double smooth_threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o colorspace: Indicate the colorspace.
%
% o verbose: Set to MagickTrue to print detailed information about the
% identified classes.
%
% o cluster_threshold: This represents the minimum number of pixels
% contained in a hexahedra before it can be considered valid (expressed
% as a percentage).
%
% o smooth_threshold: the smoothing threshold eliminates noise in the second
% derivative of the histogram. As the value is increased, you can expect a
% smoother second derivative.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SegmentImage(Image *image,
const ColorspaceType colorspace,const MagickBooleanType verbose,
const double cluster_threshold,const double smooth_threshold,
ExceptionInfo *exception)
{
ColorspaceType
previous_colorspace;
MagickBooleanType
status;
ssize_t
i;
short
*extrema[MaxDimension];
ssize_t
*histogram[MaxDimension];
/*
Allocate histogram and extrema.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
for (i=0; i < MaxDimension; i++)
{
histogram[i]=(ssize_t *) AcquireQuantumMemory(256,sizeof(**histogram));
extrema[i]=(short *) AcquireQuantumMemory(256,sizeof(**extrema));
if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL))
{
for (i-- ; i >= 0; i--)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename)
}
}
/*
Initialize histogram.
*/
previous_colorspace=image->colorspace;
(void) TransformImageColorspace(image,colorspace,exception);
InitializeHistogram(image,histogram,exception);
(void) OptimalTau(histogram[Red],Tau,0.2,DeltaTau,smooth_threshold == 0.0 ?
1.0 : smooth_threshold,extrema[Red]);
(void) OptimalTau(histogram[Green],Tau,0.2,DeltaTau,smooth_threshold == 0.0 ?
1.0 : smooth_threshold,extrema[Green]);
(void) OptimalTau(histogram[Blue],Tau,0.2,DeltaTau,smooth_threshold == 0.0 ?
1.0 : smooth_threshold,extrema[Blue]);
/*
Classify using the fuzzy c-Means technique.
*/
status=Classify(image,extrema,cluster_threshold,WeightingExponent,verbose,
exception);
(void) TransformImageColorspace(image,previous_colorspace,exception);
/*
Relinquish resources.
*/
for (i=0; i < MaxDimension; i++)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Z e r o C r o s s H i s t o g r a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ZeroCrossHistogram() find the zero crossings in a histogram and marks
% directions as: 1 is negative to positive; 0 is zero crossing; and -1
% is positive to negative.
%
% The format of the ZeroCrossHistogram method is:
%
% ZeroCrossHistogram(double *second_derivative,
% const double smooth_threshold,short *crossings)
%
% A description of each parameter follows.
%
% o second_derivative: Specifies an array of doubles representing the
% second derivative of the histogram of a particular color component.
%
% o crossings: This array of integers is initialized with
% -1, 0, or 1 representing the slope of the first derivative of the
% of a particular color component.
%
*/
static void ZeroCrossHistogram(double *second_derivative,
const double smooth_threshold,short *crossings)
{
ssize_t
i;
ssize_t
parity;
/*
Merge low numbers to zero to help prevent noise.
*/
for (i=0; i <= 255; i++)
if ((second_derivative[i] < smooth_threshold) &&
(second_derivative[i] >= -smooth_threshold))
second_derivative[i]=0.0;
/*
Mark zero crossings.
*/
parity=0;
for (i=0; i <= 255; i++)
{
crossings[i]=0;
if (second_derivative[i] < 0.0)
{
if (parity > 0)
crossings[i]=(-1);
parity=1;
}
else
if (second_derivative[i] > 0.0)
{
if (parity < 0)
crossings[i]=1;
parity=(-1);
}
}
}
|
GB_unaryop__ainv_bool_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_bool_int64
// op(A') function: GB_tran__ainv_bool_int64
// C type: bool
// A type: int64_t
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
bool z = (bool) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_BOOL || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_bool_int64
(
bool *restrict Cx,
const int64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_bool_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
image.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% IIIII M M AAA GGGG EEEEE %
% I MM MM A A G E %
% I M M M AAAAA G GG EEE %
% I M M A A G G E %
% IIIII M M A A GGGG EEEEE %
% %
% %
% MagickCore Image Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/animate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/compress.h"
#include "MagickCore/constitute.h"
#include "MagickCore/delegate.h"
#include "MagickCore/display.h"
#include "MagickCore/draw.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/histogram.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/magic.h"
#include "MagickCore/magick.h"
#include "MagickCore/magick-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/quantize.h"
#include "MagickCore/random_.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/timer.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
#include "MagickCore/version.h"
#include "MagickCore/xwindow-private.h"
/*
Constant declaration.
*/
const char
BackgroundColor[] = "#ffffff", /* white */
BorderColor[] = "#dfdfdf", /* gray */
DefaultTileFrame[] = "15x15+3+3",
DefaultTileGeometry[] = "120x120+4+3>",
DefaultTileLabel[] = "%f\n%G\n%b",
ForegroundColor[] = "#000", /* black */
LoadImageTag[] = "Load/Image",
LoadImagesTag[] = "Load/Images",
MatteColor[] = "#bdbdbd", /* gray */
PSDensityGeometry[] = "72.0x72.0",
PSPageGeometry[] = "612x792",
SaveImageTag[] = "Save/Image",
SaveImagesTag[] = "Save/Images",
TransparentColor[] = "#00000000"; /* transparent black */
const double
DefaultResolution = 72.0;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImage() returns a pointer to an image structure initialized to
% default values.
%
% The format of the AcquireImage method is:
%
% Image *AcquireImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: Many of the image default values are set from this
% structure. For example, filename, compression, depth, background color,
% and others.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AcquireImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
const char
*option;
Image
*image;
MagickStatusType
flags;
/*
Allocate image structure.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
image=(Image *) AcquireMagickMemory(sizeof(*image));
if (image == (Image *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(image,0,sizeof(*image));
/*
Initialize Image structure.
*/
(void) CopyMagickString(image->magick,"MIFF",MagickPathExtent);
image->storage_class=DirectClass;
image->depth=MAGICKCORE_QUANTUM_DEPTH;
image->colorspace=sRGBColorspace;
image->rendering_intent=PerceptualIntent;
image->gamma=1.000f/2.200f;
image->chromaticity.red_primary.x=0.6400f;
image->chromaticity.red_primary.y=0.3300f;
image->chromaticity.red_primary.z=0.0300f;
image->chromaticity.green_primary.x=0.3000f;
image->chromaticity.green_primary.y=0.6000f;
image->chromaticity.green_primary.z=0.1000f;
image->chromaticity.blue_primary.x=0.1500f;
image->chromaticity.blue_primary.y=0.0600f;
image->chromaticity.blue_primary.z=0.7900f;
image->chromaticity.white_point.x=0.3127f;
image->chromaticity.white_point.y=0.3290f;
image->chromaticity.white_point.z=0.3583f;
image->interlace=NoInterlace;
image->ticks_per_second=UndefinedTicksPerSecond;
image->compose=OverCompositeOp;
(void) QueryColorCompliance(MatteColor,AllCompliance,&image->matte_color,
exception);
(void) QueryColorCompliance(BackgroundColor,AllCompliance,
&image->background_color,exception);
(void) QueryColorCompliance(BorderColor,AllCompliance,&image->border_color,
exception);
(void) QueryColorCompliance(TransparentColor,AllCompliance,
&image->transparent_color,exception);
GetTimerInfo(&image->timer);
image->cache=AcquirePixelCache(0);
image->channel_mask=DefaultChannels;
image->channel_map=AcquirePixelChannelMap();
image->blob=CloneBlobInfo((BlobInfo *) NULL);
image->timestamp=time((time_t *) NULL);
image->debug=IsEventLogging();
image->reference_count=1;
image->semaphore=AcquireSemaphoreInfo();
image->signature=MagickCoreSignature;
if (image_info == (ImageInfo *) NULL)
return(image);
/*
Transfer image info.
*/
SetBlobExempt(image,image_info->file != (FILE *) NULL ? MagickTrue :
MagickFalse);
(void) CopyMagickString(image->filename,image_info->filename,
MagickPathExtent);
(void) CopyMagickString(image->magick_filename,image_info->filename,
MagickPathExtent);
(void) CopyMagickString(image->magick,image_info->magick,MagickPathExtent);
if (image_info->size != (char *) NULL)
{
(void) ParseAbsoluteGeometry(image_info->size,&image->extract_info);
image->columns=image->extract_info.width;
image->rows=image->extract_info.height;
image->offset=image->extract_info.x;
image->extract_info.x=0;
image->extract_info.y=0;
}
if (image_info->extract != (char *) NULL)
{
RectangleInfo
geometry;
flags=ParseAbsoluteGeometry(image_info->extract,&geometry);
if (((flags & XValue) != 0) || ((flags & YValue) != 0))
{
image->extract_info=geometry;
Swap(image->columns,image->extract_info.width);
Swap(image->rows,image->extract_info.height);
}
}
image->compression=image_info->compression;
image->quality=image_info->quality;
image->endian=image_info->endian;
image->interlace=image_info->interlace;
image->units=image_info->units;
if (image_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
flags=ParseGeometry(image_info->density,&geometry_info);
image->resolution.x=geometry_info.rho;
image->resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->resolution.y=image->resolution.x;
}
if (image_info->page != (char *) NULL)
{
char
*geometry;
image->page=image->extract_info;
geometry=GetPageGeometry(image_info->page);
(void) ParseAbsoluteGeometry(geometry,&image->page);
geometry=DestroyString(geometry);
}
if (image_info->depth != 0)
image->depth=image_info->depth;
image->dither=image_info->dither;
image->matte_color=image_info->matte_color;
image->background_color=image_info->background_color;
image->border_color=image_info->border_color;
image->transparent_color=image_info->transparent_color;
image->ping=image_info->ping;
image->progress_monitor=image_info->progress_monitor;
image->client_data=image_info->client_data;
if (image_info->cache != (void *) NULL)
ClonePixelCacheMethods(image->cache,image_info->cache);
/*
Set all global options that map to per-image settings.
*/
(void) SyncImageSettings(image_info,image,exception);
/*
Global options that are only set for new images.
*/
option=GetImageOption(image_info,"delay");
if (option != (const char *) NULL)
{
GeometryInfo
geometry_info;
flags=ParseGeometry(option,&geometry_info);
if ((flags & GreaterValue) != 0)
{
if (image->delay > (size_t) floor(geometry_info.rho+0.5))
image->delay=(size_t) floor(geometry_info.rho+0.5);
}
else
if ((flags & LessValue) != 0)
{
if (image->delay < (size_t) floor(geometry_info.rho+0.5))
image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5);
}
else
image->delay=(size_t) floor(geometry_info.rho+0.5);
if ((flags & SigmaValue) != 0)
image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5);
}
option=GetImageOption(image_info,"dispose");
if (option != (const char *) NULL)
image->dispose=(DisposeType) ParseCommandOption(MagickDisposeOptions,
MagickFalse,option);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImageInfo() allocates the ImageInfo structure.
%
% The format of the AcquireImageInfo method is:
%
% ImageInfo *AcquireImageInfo(void)
%
*/
MagickExport ImageInfo *AcquireImageInfo(void)
{
ImageInfo
*image_info;
image_info=(ImageInfo *) AcquireMagickMemory(sizeof(*image_info));
if (image_info == (ImageInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
GetImageInfo(image_info);
return(image_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e N e x t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireNextImage() initializes the next image in a sequence to
% default values. The next member of image points to the newly allocated
% image. If there is a memory shortage, next is assigned NULL.
%
% The format of the AcquireNextImage method is:
%
% void AcquireNextImage(const ImageInfo *image_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: Many of the image default values are set from this
% structure. For example, filename, compression, depth, background color,
% and others.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void AcquireNextImage(const ImageInfo *image_info,Image *image,
ExceptionInfo *exception)
{
/*
Allocate image structure.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->next=AcquireImage(image_info,exception);
if (GetNextImageInList(image) == (Image *) NULL)
return;
(void) CopyMagickString(GetNextImageInList(image)->filename,image->filename,
MagickPathExtent);
if (image_info != (ImageInfo *) NULL)
(void) CopyMagickString(GetNextImageInList(image)->filename,
image_info->filename,MagickPathExtent);
DestroyBlob(GetNextImageInList(image));
image->next->blob=ReferenceBlob(image->blob);
image->next->endian=image->endian;
image->next->scene=image->scene+1;
image->next->previous=image;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A p p e n d I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AppendImages() takes all images from the current image pointer to the end
% of the image list and appends them to each other top-to-bottom if the
% stack parameter is true, otherwise left-to-right.
%
% The current gravity setting effects how the image is justified in the
% final image.
%
% The format of the AppendImages method is:
%
% Image *AppendImages(const Image *images,const MagickBooleanType stack,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o stack: A value other than 0 stacks the images top-to-bottom.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AppendImages(const Image *images,
const MagickBooleanType stack,ExceptionInfo *exception)
{
#define AppendImageTag "Append/Image"
CacheView
*append_view;
Image
*append_image;
MagickBooleanType
homogeneous_colorspace,
status;
MagickOffsetType
n;
PixelTrait
alpha_trait;
RectangleInfo
geometry;
register const Image
*next;
size_t
depth,
height,
number_images,
width;
ssize_t
x_offset,
y,
y_offset;
/*
Compute maximum area of appended area.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
alpha_trait=images->alpha_trait;
number_images=1;
width=images->columns;
height=images->rows;
depth=images->depth;
homogeneous_colorspace=MagickTrue;
next=GetNextImageInList(images);
for ( ; next != (Image *) NULL; next=GetNextImageInList(next))
{
if (next->depth > depth)
depth=next->depth;
if (next->colorspace != images->colorspace)
homogeneous_colorspace=MagickFalse;
if (next->alpha_trait != UndefinedPixelTrait)
alpha_trait=BlendPixelTrait;
number_images++;
if (stack != MagickFalse)
{
if (next->columns > width)
width=next->columns;
height+=next->rows;
continue;
}
width+=next->columns;
if (next->rows > height)
height=next->rows;
}
/*
Append images.
*/
append_image=CloneImage(images,width,height,MagickTrue,exception);
if (append_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(append_image,DirectClass,exception) == MagickFalse)
{
append_image=DestroyImage(append_image);
return((Image *) NULL);
}
if (homogeneous_colorspace == MagickFalse)
(void) SetImageColorspace(append_image,sRGBColorspace,exception);
append_image->depth=depth;
append_image->alpha_trait=alpha_trait;
append_image->page=images->page;
(void) SetImageBackgroundColor(append_image,exception);
status=MagickTrue;
x_offset=0;
y_offset=0;
next=images;
append_view=AcquireAuthenticCacheView(append_image,exception);
for (n=0; n < (MagickOffsetType) number_images; n++)
{
CacheView
*image_view;
MagickBooleanType
proceed;
SetGeometry(append_image,&geometry);
GravityAdjustGeometry(next->columns,next->rows,next->gravity,&geometry);
if (stack != MagickFalse)
x_offset-=geometry.x;
else
y_offset-=geometry.y;
image_view=AcquireVirtualCacheView(next,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(next,next,next->rows,1)
#endif
for (y=0; y < (ssize_t) next->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
q=QueueCacheViewAuthenticPixels(append_view,x_offset,y+y_offset,
next->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
GetPixelInfo(next,&pixel);
for (x=0; x < (ssize_t) next->columns; x++)
{
if (GetPixelWriteMask(next,p) == 0)
{
SetPixelBackgoundColor(append_image,q);
p+=GetPixelChannels(next);
q+=GetPixelChannels(append_image);
continue;
}
GetPixelInfoPixel(next,p,&pixel);
SetPixelViaPixelInfo(append_image,&pixel,q);
p+=GetPixelChannels(next);
q+=GetPixelChannels(append_image);
}
sync=SyncCacheViewAuthenticPixels(append_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (stack == MagickFalse)
{
x_offset+=(ssize_t) next->columns;
y_offset=0;
}
else
{
x_offset=0;
y_offset+=(ssize_t) next->rows;
}
proceed=SetImageProgress(append_image,AppendImageTag,n,number_images);
if (proceed == MagickFalse)
break;
next=GetNextImageInList(next);
}
append_view=DestroyCacheView(append_view);
if (status == MagickFalse)
append_image=DestroyImage(append_image);
return(append_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C a t c h I m a g e E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CatchImageException() returns if no exceptions are found in the image
% sequence, otherwise it determines the most severe exception and reports
% it as a warning or error depending on the severity.
%
% The format of the CatchImageException method is:
%
% ExceptionType CatchImageException(Image *image)
%
% A description of each parameter follows:
%
% o image: An image sequence.
%
*/
MagickExport ExceptionType CatchImageException(Image *image)
{
ExceptionInfo
*exception;
ExceptionType
severity;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
exception=AcquireExceptionInfo();
CatchException(exception);
severity=exception->severity;
exception=DestroyExceptionInfo(exception);
return(severity);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l i p I m a g e P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClipImagePath() sets the image clip mask based any clipping path information
% if it exists.
%
% The format of the ClipImagePath method is:
%
% MagickBooleanType ClipImagePath(Image *image,const char *pathname,
% const MagickBooleanType inside,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o pathname: name of clipping path resource. If name is preceded by #, use
% clipping path numbered by name.
%
% o inside: if non-zero, later operations take effect inside clipping path.
% Otherwise later operations take effect outside clipping path.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ClipImage(Image *image,ExceptionInfo *exception)
{
return(ClipImagePath(image,"#1",MagickTrue,exception));
}
MagickExport MagickBooleanType ClipImagePath(Image *image,const char *pathname,
const MagickBooleanType inside,ExceptionInfo *exception)
{
#define ClipImagePathTag "ClipPath/Image"
char
*property;
const char
*value;
Image
*clip_mask;
ImageInfo
*image_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pathname != NULL);
property=AcquireString(pathname);
(void) FormatLocaleString(property,MagickPathExtent,"8BIM:1999,2998:%s",
pathname);
value=GetImageProperty(image,property,exception);
property=DestroyString(property);
if (value == (const char *) NULL)
{
ThrowFileException(exception,OptionError,"NoClipPathDefined",
image->filename);
return(MagickFalse);
}
image_info=AcquireImageInfo();
(void) CopyMagickString(image_info->filename,image->filename,
MagickPathExtent);
(void) ConcatenateMagickString(image_info->filename,pathname,
MagickPathExtent);
clip_mask=BlobToImage(image_info,value,strlen(value),exception);
image_info=DestroyImageInfo(image_info);
if (clip_mask == (Image *) NULL)
return(MagickFalse);
if (clip_mask->storage_class == PseudoClass)
{
(void) SyncImage(clip_mask,exception);
if (SetImageStorageClass(clip_mask,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
if (inside != MagickFalse)
(void) NegateImage(clip_mask,MagickFalse,exception);
(void) FormatLocaleString(clip_mask->magick_filename,MagickPathExtent,
"8BIM:1999,2998:%s\nPS",pathname);
(void) SetImageMask(image,WritePixelMask,clip_mask,exception);
clip_mask=DestroyImage(clip_mask);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImage() copies an image and returns the copy as a new image object.
%
% If the specified columns and rows is 0, an exact copy of the image is
% returned, otherwise the pixel data is undefined and must be initialized
% with the QueueAuthenticPixels() and SyncAuthenticPixels() methods. On
% failure, a NULL image is returned and exception describes the reason for the
% failure.
%
% The format of the CloneImage method is:
%
% Image *CloneImage(const Image *image,const size_t columns,
% const size_t rows,const MagickBooleanType orphan,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the cloned image.
%
% o rows: the number of rows in the cloned image.
%
% o detach: With a value other than 0, the cloned image is detached from
% its parent I/O stream.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CloneImage(const Image *image,const size_t columns,
const size_t rows,const MagickBooleanType detach,ExceptionInfo *exception)
{
Image
*clone_image;
double
scale;
size_t
length;
/*
Clone the image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((image->columns == 0) || (image->rows == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError,
"NegativeOrZeroImageSize","`%s'",image->filename);
return((Image *) NULL);
}
clone_image=(Image *) AcquireMagickMemory(sizeof(*clone_image));
if (clone_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
(void) ResetMagickMemory(clone_image,0,sizeof(*clone_image));
clone_image->signature=MagickCoreSignature;
clone_image->storage_class=image->storage_class;
clone_image->number_channels=image->number_channels;
clone_image->number_meta_channels=image->number_meta_channels;
clone_image->metacontent_extent=image->metacontent_extent;
clone_image->colorspace=image->colorspace;
clone_image->read_mask=image->read_mask;
clone_image->write_mask=image->write_mask;
clone_image->alpha_trait=image->alpha_trait;
clone_image->columns=image->columns;
clone_image->rows=image->rows;
clone_image->dither=image->dither;
clone_image->image_info=CloneImageInfo(image->image_info);
(void) CloneImageProfiles(clone_image,image);
(void) CloneImageProperties(clone_image,image);
(void) CloneImageArtifacts(clone_image,image);
GetTimerInfo(&clone_image->timer);
if (image->ascii85 != (void *) NULL)
Ascii85Initialize(clone_image);
clone_image->magick_columns=image->magick_columns;
clone_image->magick_rows=image->magick_rows;
clone_image->type=image->type;
clone_image->channel_mask=image->channel_mask;
clone_image->channel_map=ClonePixelChannelMap(image->channel_map);
(void) CopyMagickString(clone_image->magick_filename,image->magick_filename,
MagickPathExtent);
(void) CopyMagickString(clone_image->magick,image->magick,MagickPathExtent);
(void) CopyMagickString(clone_image->filename,image->filename,
MagickPathExtent);
clone_image->progress_monitor=image->progress_monitor;
clone_image->client_data=image->client_data;
clone_image->reference_count=1;
clone_image->next=image->next;
clone_image->previous=image->previous;
clone_image->list=NewImageList();
if (detach == MagickFalse)
clone_image->blob=ReferenceBlob(image->blob);
else
{
clone_image->next=NewImageList();
clone_image->previous=NewImageList();
clone_image->blob=CloneBlobInfo((BlobInfo *) NULL);
}
clone_image->ping=image->ping;
clone_image->debug=IsEventLogging();
clone_image->semaphore=AcquireSemaphoreInfo();
if (image->colormap != (PixelInfo *) NULL)
{
/*
Allocate and copy the image colormap.
*/
clone_image->colors=image->colors;
length=(size_t) image->colors;
clone_image->colormap=(PixelInfo *) AcquireQuantumMemory(length+1,
sizeof(*clone_image->colormap));
if (clone_image->colormap == (PixelInfo *) NULL)
{
clone_image=DestroyImage(clone_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) CopyMagickMemory(clone_image->colormap,image->colormap,length*
sizeof(*clone_image->colormap));
}
if ((columns == 0) || (rows == 0))
{
if (image->montage != (char *) NULL)
(void) CloneString(&clone_image->montage,image->montage);
if (image->directory != (char *) NULL)
(void) CloneString(&clone_image->directory,image->directory);
clone_image->cache=ReferencePixelCache(image->cache);
return(clone_image);
}
scale=1.0;
if (image->columns != 0)
scale=(double) columns/(double) image->columns;
clone_image->page.width=(size_t) floor(scale*image->page.width+0.5);
clone_image->page.x=(ssize_t) ceil(scale*image->page.x-0.5);
clone_image->tile_offset.x=(ssize_t) ceil(scale*image->tile_offset.x-0.5);
scale=1.0;
if (image->rows != 0)
scale=(double) rows/(double) image->rows;
clone_image->page.height=(size_t) floor(scale*image->page.height+0.5);
clone_image->page.y=(ssize_t) ceil(scale*image->page.y-0.5);
clone_image->tile_offset.y=(ssize_t) ceil(scale*image->tile_offset.y-0.5);
clone_image->cache=ClonePixelCache(image->cache);
if (SetImageExtent(clone_image,columns,rows,exception) == MagickFalse)
clone_image=DestroyImage(clone_image);
return(clone_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImageInfo() makes a copy of the given image info structure. If
% NULL is specified, a new image info structure is created initialized to
% default values.
%
% The format of the CloneImageInfo method is:
%
% ImageInfo *CloneImageInfo(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport ImageInfo *CloneImageInfo(const ImageInfo *image_info)
{
ImageInfo
*clone_info;
clone_info=AcquireImageInfo();
if (image_info == (ImageInfo *) NULL)
return(clone_info);
clone_info->compression=image_info->compression;
clone_info->temporary=image_info->temporary;
clone_info->adjoin=image_info->adjoin;
clone_info->antialias=image_info->antialias;
clone_info->scene=image_info->scene;
clone_info->number_scenes=image_info->number_scenes;
clone_info->depth=image_info->depth;
(void) CloneString(&clone_info->size,image_info->size);
(void) CloneString(&clone_info->extract,image_info->extract);
(void) CloneString(&clone_info->scenes,image_info->scenes);
(void) CloneString(&clone_info->page,image_info->page);
clone_info->interlace=image_info->interlace;
clone_info->endian=image_info->endian;
clone_info->units=image_info->units;
clone_info->quality=image_info->quality;
(void) CloneString(&clone_info->sampling_factor,image_info->sampling_factor);
(void) CloneString(&clone_info->server_name,image_info->server_name);
(void) CloneString(&clone_info->font,image_info->font);
(void) CloneString(&clone_info->texture,image_info->texture);
(void) CloneString(&clone_info->density,image_info->density);
clone_info->pointsize=image_info->pointsize;
clone_info->fuzz=image_info->fuzz;
clone_info->matte_color=image_info->matte_color;
clone_info->background_color=image_info->background_color;
clone_info->border_color=image_info->border_color;
clone_info->transparent_color=image_info->transparent_color;
clone_info->dither=image_info->dither;
clone_info->monochrome=image_info->monochrome;
clone_info->colorspace=image_info->colorspace;
clone_info->type=image_info->type;
clone_info->orientation=image_info->orientation;
clone_info->ping=image_info->ping;
clone_info->verbose=image_info->verbose;
clone_info->progress_monitor=image_info->progress_monitor;
clone_info->client_data=image_info->client_data;
clone_info->cache=image_info->cache;
if (image_info->cache != (void *) NULL)
clone_info->cache=ReferencePixelCache(image_info->cache);
if (image_info->profile != (void *) NULL)
clone_info->profile=(void *) CloneStringInfo((StringInfo *)
image_info->profile);
SetImageInfoFile(clone_info,image_info->file);
SetImageInfoBlob(clone_info,image_info->blob,image_info->length);
clone_info->stream=image_info->stream;
clone_info->custom_stream=image_info->custom_stream;
(void) CopyMagickString(clone_info->magick,image_info->magick,
MagickPathExtent);
(void) CopyMagickString(clone_info->unique,image_info->unique,
MagickPathExtent);
(void) CopyMagickString(clone_info->filename,image_info->filename,
MagickPathExtent);
clone_info->channel=image_info->channel;
(void) CloneImageOptions(clone_info,image_info);
clone_info->debug=IsEventLogging();
clone_info->signature=image_info->signature;
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o p y I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CopyImagePixels() copies pixels from the source image as defined by the
% geometry the destination image at the specified offset.
%
% The format of the CopyImagePixels method is:
%
% MagickBooleanType CopyImagePixels(Image *image,const Image *source_image,
% const RectangleInfo *geometry,const OffsetInfo *offset,
% ExceptionInfo *exception);
%
% A description of each parameter follows:
%
% o image: the destination image.
%
% o source_image: the source image.
%
% o geometry: define the dimensions of the source pixel rectangle.
%
% o offset: define the offset in the destination image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType CopyImagePixels(Image *image,
const Image *source_image,const RectangleInfo *geometry,
const OffsetInfo *offset,ExceptionInfo *exception)
{
#define CopyImageTag "Copy/Image"
CacheView
*image_view,
*source_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(source_image != (Image *) NULL);
assert(geometry != (RectangleInfo *) NULL);
assert(offset != (OffsetInfo *) NULL);
if ((offset->x < 0) || (offset->y < 0) ||
((ssize_t) (offset->x+geometry->width) > (ssize_t) image->columns) ||
((ssize_t) (offset->y+geometry->height) > (ssize_t) image->rows))
ThrowBinaryException(OptionError,"GeometryDoesNotContainImage",
image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
/*
Copy image pixels.
*/
status=MagickTrue;
progress=0;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,source_image,geometry->height,1)
#endif
for (y=0; y < (ssize_t) geometry->height; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,geometry->x,y+geometry->y,
geometry->width,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,offset->x,y+offset->y,
geometry->width,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) geometry->width; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait source_traits=GetPixelChannelTraits(source_image,channel);
if ((traits == UndefinedPixelTrait) ||
((traits & UpdatePixelTrait) == 0) ||
(source_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(image,channel,p[i],q);
}
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_CopyImage)
#endif
proceed=SetImageProgress(image,CopyImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImage() dereferences an image, deallocating memory associated with
% the image if the reference count becomes zero.
%
% The format of the DestroyImage method is:
%
% Image *DestroyImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *DestroyImage(Image *image)
{
MagickBooleanType
destroy;
/*
Dereference image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
destroy=MagickFalse;
LockSemaphoreInfo(image->semaphore);
image->reference_count--;
if (image->reference_count == 0)
destroy=MagickTrue;
UnlockSemaphoreInfo(image->semaphore);
if (destroy == MagickFalse)
return((Image *) NULL);
/*
Destroy image.
*/
DestroyImagePixels(image);
image->channel_map=DestroyPixelChannelMap(image->channel_map);
if (image->montage != (char *) NULL)
image->montage=DestroyString(image->montage);
if (image->directory != (char *) NULL)
image->directory=DestroyString(image->directory);
if (image->colormap != (PixelInfo *) NULL)
image->colormap=(PixelInfo *) RelinquishMagickMemory(image->colormap);
if (image->geometry != (char *) NULL)
image->geometry=DestroyString(image->geometry);
DestroyImageProfiles(image);
DestroyImageProperties(image);
DestroyImageArtifacts(image);
if (image->ascii85 != (Ascii85Info *) NULL)
image->ascii85=(Ascii85Info *) RelinquishMagickMemory(image->ascii85);
if (image->image_info != (ImageInfo *) NULL)
image->image_info=DestroyImageInfo(image->image_info);
DestroyBlob(image);
if (image->semaphore != (SemaphoreInfo *) NULL)
RelinquishSemaphoreInfo(&image->semaphore);
image->signature=(~MagickCoreSignature);
image=(Image *) RelinquishMagickMemory(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImageInfo() deallocates memory associated with an ImageInfo
% structure.
%
% The format of the DestroyImageInfo method is:
%
% ImageInfo *DestroyImageInfo(ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport ImageInfo *DestroyImageInfo(ImageInfo *image_info)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
if (image_info->size != (char *) NULL)
image_info->size=DestroyString(image_info->size);
if (image_info->extract != (char *) NULL)
image_info->extract=DestroyString(image_info->extract);
if (image_info->scenes != (char *) NULL)
image_info->scenes=DestroyString(image_info->scenes);
if (image_info->page != (char *) NULL)
image_info->page=DestroyString(image_info->page);
if (image_info->sampling_factor != (char *) NULL)
image_info->sampling_factor=DestroyString(
image_info->sampling_factor);
if (image_info->server_name != (char *) NULL)
image_info->server_name=DestroyString(
image_info->server_name);
if (image_info->font != (char *) NULL)
image_info->font=DestroyString(image_info->font);
if (image_info->texture != (char *) NULL)
image_info->texture=DestroyString(image_info->texture);
if (image_info->density != (char *) NULL)
image_info->density=DestroyString(image_info->density);
if (image_info->cache != (void *) NULL)
image_info->cache=DestroyPixelCache(image_info->cache);
if (image_info->profile != (StringInfo *) NULL)
image_info->profile=(void *) DestroyStringInfo((StringInfo *)
image_info->profile);
DestroyImageOptions(image_info);
image_info->signature=(~MagickCoreSignature);
image_info=(ImageInfo *) RelinquishMagickMemory(image_info);
return(image_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i s a s s o c i a t e I m a g e S t r e a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DisassociateImageStream() disassociates the image stream. It checks if the
% blob of the specified image is referenced by other images. If the reference
% count is higher then 1 a new blob is assigned to the specified image.
%
% The format of the DisassociateImageStream method is:
%
% void DisassociateImageStream(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DisassociateImageStream(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
DisassociateBlob(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageInfo() initializes image_info to default values.
%
% The format of the GetImageInfo method is:
%
% void GetImageInfo(ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport void GetImageInfo(ImageInfo *image_info)
{
char
*synchronize;
ExceptionInfo
*exception;
/*
File and image dimension members.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image_info != (ImageInfo *) NULL);
(void) ResetMagickMemory(image_info,0,sizeof(*image_info));
image_info->adjoin=MagickTrue;
image_info->interlace=NoInterlace;
image_info->channel=DefaultChannels;
image_info->quality=UndefinedCompressionQuality;
image_info->antialias=MagickTrue;
image_info->dither=MagickTrue;
synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (synchronize != (const char *) NULL)
{
image_info->synchronize=IsStringTrue(synchronize);
synchronize=DestroyString(synchronize);
}
exception=AcquireExceptionInfo();
(void) QueryColorCompliance(BackgroundColor,AllCompliance,
&image_info->background_color,exception);
(void) QueryColorCompliance(BorderColor,AllCompliance,
&image_info->border_color,exception);
(void) QueryColorCompliance(MatteColor,AllCompliance,&image_info->matte_color,
exception);
(void) QueryColorCompliance(TransparentColor,AllCompliance,
&image_info->transparent_color,exception);
exception=DestroyExceptionInfo(exception);
image_info->debug=IsEventLogging();
image_info->signature=MagickCoreSignature;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e I n f o F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageInfoFile() returns the image info file member.
%
% The format of the GetImageInfoFile method is:
%
% FILE *GetImageInfoFile(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport FILE *GetImageInfoFile(const ImageInfo *image_info)
{
return(image_info->file);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageMask() returns the mask associated with the image.
%
% The format of the GetImageMask method is:
%
% Image *GetImageMask(const Image *image,const PixelMask type,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: the mask type, ReadPixelMask or WritePixelMask.
%
*/
MagickExport Image *GetImageMask(const Image *image,const PixelMask type,
ExceptionInfo *exception)
{
CacheView
*mask_view,
*image_view;
Image
*mask_image;
MagickBooleanType
status;
ssize_t
y;
/*
Get image mask.
*/
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
mask_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (mask_image == (Image *) NULL)
return((Image *) NULL);
status=MagickTrue;
mask_image->alpha_trait=UndefinedPixelTrait;
(void) SetImageColorspace(mask_image,GRAYColorspace,exception);
mask_image->read_mask=MagickFalse;
image_view=AcquireVirtualCacheView(image,exception);
mask_view=AcquireAuthenticCacheView(mask_image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(mask_view,0,y,mask_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
switch (type)
{
case WritePixelMask:
{
SetPixelGray(mask_image,GetPixelWriteMask(image,p),q);
break;
}
default:
{
SetPixelGray(mask_image,GetPixelReadMask(image,p),q);
break;
}
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(mask_image);
}
if (SyncCacheViewAuthenticPixels(mask_view,exception) == MagickFalse)
status=MagickFalse;
}
mask_view=DestroyCacheView(mask_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
mask_image=DestroyImage(mask_image);
return(mask_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e R e f e r e n c e C o u n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageReferenceCount() returns the image reference count.
%
% The format of the GetReferenceCount method is:
%
% ssize_t GetImageReferenceCount(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport ssize_t GetImageReferenceCount(Image *image)
{
ssize_t
reference_count;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
LockSemaphoreInfo(image->semaphore);
reference_count=image->reference_count;
UnlockSemaphoreInfo(image->semaphore);
return(reference_count);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i r t u a l P i x e l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageVirtualPixelMethod() gets the "virtual pixels" method for the
% image. A virtual pixel is any pixel access that is outside the boundaries
% of the image cache.
%
% The format of the GetImageVirtualPixelMethod() method is:
%
% VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
return(GetPixelCacheVirtualMethod(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e r p r e t I m a g e F i l e n a m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InterpretImageFilename() interprets embedded characters in an image filename.
% The filename length is returned.
%
% The format of the InterpretImageFilename method is:
%
% size_t InterpretImageFilename(const ImageInfo *image_info,Image *image,
% const char *format,int value,char *filename,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image_info: the image info..
%
% o image: the image.
%
% o format: A filename describing the format to use to write the numeric
% argument. Only the first numeric format identifier is replaced.
%
% o value: Numeric value to substitute into format filename.
%
% o filename: return the formatted filename in this character buffer.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport size_t InterpretImageFilename(const ImageInfo *image_info,
Image *image,const char *format,int value,char *filename,
ExceptionInfo *exception)
{
char
*q;
int
c;
MagickBooleanType
canonical;
register const char
*p;
size_t
length;
canonical=MagickFalse;
length=0;
(void) CopyMagickString(filename,format,MagickPathExtent);
for (p=strchr(format,'%'); p != (char *) NULL; p=strchr(p+1,'%'))
{
q=(char *) p+1;
if (*q == '%')
{
p=q+1;
continue;
}
if (*q == '0')
{
ssize_t
foo;
foo=(ssize_t) strtol(q,&q,10);
(void) foo;
}
switch (*q)
{
case 'd':
case 'o':
case 'x':
{
q++;
c=(*q);
*q='\0';
(void) FormatLocaleString(filename+(p-format),(size_t)
(MagickPathExtent-(p-format)),p,value);
*q=c;
(void) ConcatenateMagickString(filename,q,MagickPathExtent);
canonical=MagickTrue;
if (*(q-1) != '%')
break;
p++;
break;
}
case '[':
{
char
pattern[MagickPathExtent];
const char
*option;
register char
*r;
register ssize_t
i;
ssize_t
depth;
/*
Image option.
*/
/* FUTURE: Compare update with code from InterpretImageProperties()
Note that a 'filename:' property should not need depth recursion.
*/
if (strchr(p,']') == (char *) NULL)
break;
depth=1;
r=q+1;
for (i=0; (i < (MagickPathExtent-1L)) && (*r != '\0'); i++)
{
if (*r == '[')
depth++;
if (*r == ']')
depth--;
if (depth <= 0)
break;
pattern[i]=(*r++);
}
pattern[i]='\0';
if (LocaleNCompare(pattern,"filename:",9) != 0)
break;
option=(const char *) NULL;
if (image != (Image *) NULL)
option=GetImageProperty(image,pattern,exception);
if ((option == (const char *) NULL) && (image != (Image *) NULL))
option=GetImageArtifact(image,pattern);
if ((option == (const char *) NULL) &&
(image_info != (ImageInfo *) NULL))
option=GetImageOption(image_info,pattern);
if (option == (const char *) NULL)
break;
q--;
c=(*q);
*q='\0';
(void) CopyMagickString(filename+(p-format-length),option,(size_t)
(MagickPathExtent-(p-format-length)));
length+=strlen(pattern)-1;
*q=c;
(void) ConcatenateMagickString(filename,r+1,MagickPathExtent);
canonical=MagickTrue;
if (*(q-1) != '%')
break;
p++;
break;
}
default:
break;
}
}
for (q=filename; *q != '\0'; q++)
if ((*q == '%') && (*(q+1) == '%'))
{
(void) CopyMagickString(q,q+1,(size_t) (MagickPathExtent-(q-filename)));
canonical=MagickTrue;
}
if (canonical == MagickFalse)
(void) CopyMagickString(filename,format,MagickPathExtent);
return(strlen(filename));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s H i g h D y n a m i c R a n g e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsHighDynamicRangeImage() returns MagickTrue if any pixel component is
% non-integer or exceeds the bounds of the quantum depth (e.g. for Q16
% 0..65535.
%
% The format of the IsHighDynamicRangeImage method is:
%
% MagickBooleanType IsHighDynamicRangeImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsHighDynamicRangeImage(const Image *image,
ExceptionInfo *exception)
{
#if !defined(MAGICKCORE_HDRI_SUPPORT)
(void) image;
(void) exception;
return(MagickFalse);
#else
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,p) == 0)
{
p+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
pixel;
PixelTrait
traits;
traits=GetPixelChannelTraits(image,(PixelChannel) i);
if (traits == UndefinedPixelTrait)
continue;
pixel=(double) p[i];
if ((pixel < 0.0) || (pixel > QuantumRange) ||
(pixel != (double) ((QuantumAny) pixel)))
break;
}
p+=GetPixelChannels(image);
if (i < (ssize_t) GetPixelChannels(image))
status=MagickFalse;
}
if (x < (ssize_t) image->columns)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status != MagickFalse ? MagickFalse : MagickTrue);
#endif
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e O b j e c t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImageObject() returns MagickTrue if the image sequence contains a valid
% set of image objects.
%
% The format of the IsImageObject method is:
%
% MagickBooleanType IsImageObject(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsImageObject(const Image *image)
{
register const Image
*p;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
for (p=image; p != (Image *) NULL; p=GetNextImageInList(p))
if (p->signature != MagickCoreSignature)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s T a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsTaintImage() returns MagickTrue any pixel in the image has been altered
% since it was first constituted.
%
% The format of the IsTaintImage method is:
%
% MagickBooleanType IsTaintImage(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsTaintImage(const Image *image)
{
char
magick[MagickPathExtent],
filename[MagickPathExtent];
register const Image
*p;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
(void) CopyMagickString(magick,image->magick,MagickPathExtent);
(void) CopyMagickString(filename,image->filename,MagickPathExtent);
for (p=image; p != (Image *) NULL; p=GetNextImageInList(p))
{
if (p->taint != MagickFalse)
return(MagickTrue);
if (LocaleCompare(p->magick,magick) != 0)
return(MagickTrue);
if (LocaleCompare(p->filename,filename) != 0)
return(MagickTrue);
}
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o d i f y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ModifyImage() ensures that there is only a single reference to the image
% to be modified, updating the provided image pointer to point to a clone of
% the original image if necessary.
%
% The format of the ModifyImage method is:
%
% MagickBooleanType ModifyImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ModifyImage(Image **image,
ExceptionInfo *exception)
{
Image
*clone_image;
assert(image != (Image **) NULL);
assert(*image != (Image *) NULL);
assert((*image)->signature == MagickCoreSignature);
if ((*image)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename);
if (GetImageReferenceCount(*image) <= 1)
return(MagickTrue);
clone_image=CloneImage(*image,0,0,MagickTrue,exception);
LockSemaphoreInfo((*image)->semaphore);
(*image)->reference_count--;
UnlockSemaphoreInfo((*image)->semaphore);
*image=clone_image;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w M a g i c k I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewMagickImage() creates a blank image canvas of the specified size and
% background color.
%
% The format of the NewMagickImage method is:
%
% Image *NewMagickImage(const ImageInfo *image_info,const size_t width,
% const size_t height,const PixelInfo *background,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the image width.
%
% o height: the image height.
%
% o background: the image color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *NewMagickImage(const ImageInfo *image_info,
const size_t width,const size_t height,const PixelInfo *background,
ExceptionInfo *exception)
{
CacheView
*image_view;
Image
*image;
MagickBooleanType
status;
ssize_t
y;
assert(image_info != (const ImageInfo *) NULL);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image_info->signature == MagickCoreSignature);
assert(background != (const PixelInfo *) NULL);
image=AcquireImage(image_info,exception);
image->columns=width;
image->rows=height;
image->colorspace=background->colorspace;
image->alpha_trait=background->alpha_trait;
image->fuzz=background->fuzz;
image->depth=background->depth;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelViaPixelInfo(image,background,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e f e r e n c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReferenceImage() increments the reference count associated with an image
% returning a pointer to the image.
%
% The format of the ReferenceImage method is:
%
% Image *ReferenceImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *ReferenceImage(Image *image)
{
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
LockSemaphoreInfo(image->semaphore);
image->reference_count++;
UnlockSemaphoreInfo(image->semaphore);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s e t I m a g e P a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetImagePage() resets the image page canvas and position.
%
% The format of the ResetImagePage method is:
%
% MagickBooleanType ResetImagePage(Image *image,const char *page)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o page: the relative page specification.
%
*/
MagickExport MagickBooleanType ResetImagePage(Image *image,const char *page)
{
MagickStatusType
flags;
RectangleInfo
geometry;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
flags=ParseAbsoluteGeometry(page,&geometry);
if ((flags & WidthValue) != 0)
{
if ((flags & HeightValue) == 0)
geometry.height=geometry.width;
image->page.width=geometry.width;
image->page.height=geometry.height;
}
if ((flags & AspectValue) != 0)
{
if ((flags & XValue) != 0)
image->page.x+=geometry.x;
if ((flags & YValue) != 0)
image->page.y+=geometry.y;
}
else
{
if ((flags & XValue) != 0)
{
image->page.x=geometry.x;
if ((image->page.width == 0) && (geometry.x > 0))
image->page.width=image->columns+geometry.x;
}
if ((flags & YValue) != 0)
{
image->page.y=geometry.y;
if ((image->page.height == 0) && (geometry.y > 0))
image->page.height=image->rows+geometry.y;
}
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e A l p h a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageAlpha() sets the alpha levels of the image.
%
% The format of the SetImageAlpha method is:
%
% MagickBooleanType SetImageAlpha(Image *image,const Quantum alpha,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o Alpha: the level of transparency: 0 is fully opaque and QuantumRange is
% fully transparent.
%
*/
MagickExport MagickBooleanType SetImageAlpha(Image *image,const Quantum alpha,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
image->alpha_trait=BlendPixelTrait;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelWriteMask(image,q) != 0)
SetPixelAlpha(image,alpha,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e B a c k g r o u n d C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageBackgroundColor() initializes the image pixels to the image
% background color. The background color is defined by the background_color
% member of the image structure.
%
% The format of the SetImage method is:
%
% MagickBooleanType SetImageBackgroundColor(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageBackgroundColor(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
PixelInfo
background;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if ((image->background_color.alpha != OpaqueAlpha) &&
(image->alpha_trait == UndefinedPixelTrait))
(void) SetImageAlphaChannel(image,OnAlphaChannel,exception);
ConformPixelInfo(image,&image->background_color,&background,exception);
/*
Set image background color.
*/
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelViaPixelInfo(image,&background,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C h a n n e l M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageChannelMask() sets the image channel mask from the specified channel
% mask.
%
% The format of the SetImageChannelMask method is:
%
% ChannelType SetImageChannelMask(Image *image,
% const ChannelType channel_mask)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel_mask: the channel mask.
%
*/
MagickExport ChannelType SetImageChannelMask(Image *image,
const ChannelType channel_mask)
{
return(SetPixelChannelMask(image,channel_mask));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColor() set the entire image canvas to the specified color.
%
% The format of the SetImageColor method is:
%
% MagickBooleanType SetImageColor(Image *image,const PixelInfo *color,
% ExeptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o background: the image color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageColor(Image *image,
const PixelInfo *color,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
assert(color != (const PixelInfo *) NULL);
image->colorspace=color->colorspace;
image->alpha_trait=color->alpha_trait;
image->fuzz=color->fuzz;
image->depth=color->depth;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelViaPixelInfo(image,color,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e S t o r a g e C l a s s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageStorageClass() sets the image class: DirectClass for true color
% images or PseudoClass for colormapped images.
%
% The format of the SetImageStorageClass method is:
%
% MagickBooleanType SetImageStorageClass(Image *image,
% const ClassType storage_class,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o storage_class: The image class.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageStorageClass(Image *image,
const ClassType storage_class,ExceptionInfo *exception)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image->storage_class=storage_class;
return(SyncImagePixelCache(image,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageExtent() sets the image size (i.e. columns & rows).
%
% The format of the SetImageExtent method is:
%
% MagickBooleanType SetImageExtent(Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: The image width in pixels.
%
% o rows: The image height in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageExtent(Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
if ((columns == 0) || (rows == 0))
ThrowBinaryException(ImageError,"NegativeOrZeroImageSize",image->filename);
image->columns=columns;
image->rows=rows;
if (image->depth > (8*sizeof(MagickSizeType)))
ThrowBinaryException(ImageError,"ImageDepthNotSupported",image->filename);
return(SyncImagePixelCache(image,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfo() initializes the 'magick' field of the ImageInfo structure.
% It is set to a type of image format based on the prefix or suffix of the
% filename. For example, 'ps:image' returns PS indicating a Postscript image.
% JPEG is returned for this filename: 'image.jpg'. The filename prefix has
% precendence over the suffix. Use an optional index enclosed in brackets
% after a file name to specify a desired scene of a multi-resolution image
% format like Photo CD (e.g. img0001.pcd[4]). A True (non-zero) return value
% indicates success.
%
% The format of the SetImageInfo method is:
%
% MagickBooleanType SetImageInfo(ImageInfo *image_info,
% const unsigned int frames,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o frames: the number of images you intend to write.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageInfo(ImageInfo *image_info,
const unsigned int frames,ExceptionInfo *exception)
{
char
component[MagickPathExtent],
magic[MagickPathExtent],
*q;
const MagicInfo
*magic_info;
const MagickInfo
*magick_info;
ExceptionInfo
*sans_exception;
Image
*image;
MagickBooleanType
status;
register const char
*p;
ssize_t
count;
/*
Look for 'image.format' in filename.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
*component='\0';
GetPathComponent(image_info->filename,SubimagePath,component);
if (*component != '\0')
{
/*
Look for scene specification (e.g. img0001.pcd[4]).
*/
if (IsSceneGeometry(component,MagickFalse) == MagickFalse)
{
if (IsGeometry(component) != MagickFalse)
(void) CloneString(&image_info->extract,component);
}
else
{
size_t
first,
last;
(void) CloneString(&image_info->scenes,component);
image_info->scene=StringToUnsignedLong(image_info->scenes);
image_info->number_scenes=image_info->scene;
p=image_info->scenes;
for (q=(char *) image_info->scenes; *q != '\0'; p++)
{
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == ','))
p++;
first=(size_t) strtol(p,&q,10);
last=first;
while (isspace((int) ((unsigned char) *q)) != 0)
q++;
if (*q == '-')
last=(size_t) strtol(q+1,&q,10);
if (first > last)
Swap(first,last);
if (first < image_info->scene)
image_info->scene=first;
if (last > image_info->number_scenes)
image_info->number_scenes=last;
p=q;
}
image_info->number_scenes-=image_info->scene-1;
}
}
*component='\0';
if (*image_info->magick == '\0')
GetPathComponent(image_info->filename,ExtensionPath,component);
#if defined(MAGICKCORE_ZLIB_DELEGATE)
if (*component != '\0')
if ((LocaleCompare(component,"gz") == 0) ||
(LocaleCompare(component,"Z") == 0) ||
(LocaleCompare(component,"svgz") == 0) ||
(LocaleCompare(component,"wmz") == 0))
{
char
path[MagickPathExtent];
(void) CopyMagickString(path,image_info->filename,MagickPathExtent);
path[strlen(path)-strlen(component)-1]='\0';
GetPathComponent(path,ExtensionPath,component);
}
#endif
#if defined(MAGICKCORE_BZLIB_DELEGATE)
if (*component != '\0')
if (LocaleCompare(component,"bz2") == 0)
{
char
path[MagickPathExtent];
(void) CopyMagickString(path,image_info->filename,MagickPathExtent);
path[strlen(path)-strlen(component)-1]='\0';
GetPathComponent(path,ExtensionPath,component);
}
#endif
image_info->affirm=MagickFalse;
sans_exception=AcquireExceptionInfo();
if (*component != '\0')
{
MagickFormatType
format_type;
register ssize_t
i;
static const char
*format_type_formats[] =
{
"AUTOTRACE",
"BROWSE",
"DCRAW",
"EDIT",
"LAUNCH",
"MPEG:DECODE",
"MPEG:ENCODE",
"PRINT",
"PS:ALPHA",
"PS:CMYK",
"PS:COLOR",
"PS:GRAY",
"PS:MONO",
"SCAN",
"SHOW",
"WIN",
(char *) NULL
};
/*
User specified image format.
*/
(void) CopyMagickString(magic,component,MagickPathExtent);
LocaleUpper(magic);
/*
Look for explicit image formats.
*/
format_type=UndefinedFormatType;
magick_info=GetMagickInfo(magic,sans_exception);
if ((magick_info != (const MagickInfo *) NULL) &&
(magick_info->format_type != UndefinedFormatType))
format_type=magick_info->format_type;
i=0;
while ((format_type == UndefinedFormatType) &&
(format_type_formats[i] != (char *) NULL))
{
if ((*magic == *format_type_formats[i]) &&
(LocaleCompare(magic,format_type_formats[i]) == 0))
format_type=ExplicitFormatType;
i++;
}
if (format_type == UndefinedFormatType)
(void) CopyMagickString(image_info->magick,magic,MagickPathExtent);
else
if (format_type == ExplicitFormatType)
{
image_info->affirm=MagickTrue;
(void) CopyMagickString(image_info->magick,magic,MagickPathExtent);
}
if (LocaleCompare(magic,"RGB") == 0)
image_info->affirm=MagickFalse; /* maybe SGI disguised as RGB */
}
/*
Look for explicit 'format:image' in filename.
*/
*magic='\0';
GetPathComponent(image_info->filename,MagickPath,magic);
if (*magic == '\0')
{
(void) CopyMagickString(magic,image_info->magick,MagickPathExtent);
magick_info=GetMagickInfo(magic,sans_exception);
GetPathComponent(image_info->filename,CanonicalPath,component);
(void) CopyMagickString(image_info->filename,component,MagickPathExtent);
}
else
{
const DelegateInfo
*delegate_info;
/*
User specified image format.
*/
LocaleUpper(magic);
magick_info=GetMagickInfo(magic,sans_exception);
delegate_info=GetDelegateInfo(magic,"*",sans_exception);
if (delegate_info == (const DelegateInfo *) NULL)
delegate_info=GetDelegateInfo("*",magic,sans_exception);
if (((magick_info != (const MagickInfo *) NULL) ||
(delegate_info != (const DelegateInfo *) NULL)) &&
(IsMagickConflict(magic) == MagickFalse))
{
image_info->affirm=MagickTrue;
(void) CopyMagickString(image_info->magick,magic,MagickPathExtent);
GetPathComponent(image_info->filename,CanonicalPath,component);
(void) CopyMagickString(image_info->filename,component,
MagickPathExtent);
}
}
sans_exception=DestroyExceptionInfo(sans_exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
if ((image_info->adjoin != MagickFalse) && (frames > 1))
{
/*
Test for multiple image support (e.g. image%02d.png).
*/
(void) InterpretImageFilename(image_info,(Image *) NULL,
image_info->filename,(int) image_info->scene,component,exception);
if ((LocaleCompare(component,image_info->filename) != 0) &&
(strchr(component,'%') == (char *) NULL))
image_info->adjoin=MagickFalse;
}
if ((image_info->adjoin != MagickFalse) && (frames > 0))
{
/*
Some image formats do not support multiple frames per file.
*/
magick_info=GetMagickInfo(magic,exception);
if (magick_info != (const MagickInfo *) NULL)
if (GetMagickAdjoin(magick_info) == MagickFalse)
image_info->adjoin=MagickFalse;
}
if (image_info->affirm != MagickFalse)
return(MagickTrue);
if (frames == 0)
{
unsigned char
*magick;
size_t
magick_size;
/*
Determine the image format from the first few bytes of the file.
*/
magick_size=GetMagicPatternExtent(exception);
if (magick_size == 0)
return(MagickFalse);
image=AcquireImage(image_info,exception);
(void) CopyMagickString(image->filename,image_info->filename,
MagickPathExtent);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImage(image);
return(MagickFalse);
}
if ((IsBlobSeekable(image) == MagickFalse) ||
(IsBlobExempt(image) != MagickFalse))
{
/*
Copy standard input or pipe to temporary file.
*/
*component='\0';
status=ImageToFile(image,component,exception);
(void) CloseBlob(image);
if (status == MagickFalse)
{
image=DestroyImage(image);
return(MagickFalse);
}
SetImageInfoFile(image_info,(FILE *) NULL);
(void) CopyMagickString(image->filename,component,MagickPathExtent);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImage(image);
return(MagickFalse);
}
(void) CopyMagickString(image_info->filename,component,
MagickPathExtent);
image_info->temporary=MagickTrue;
}
magick=(unsigned char *) AcquireMagickMemory(magick_size);
if (magick == (unsigned char *) NULL)
{
(void) CloseBlob(image);
image=DestroyImage(image);
return(MagickFalse);
}
(void) ResetMagickMemory(magick,0,magick_size);
count=ReadBlob(image,magick_size,magick);
(void) SeekBlob(image,-((MagickOffsetType) count),SEEK_CUR);
(void) CloseBlob(image);
image=DestroyImage(image);
/*
Check magic.xml configuration file.
*/
sans_exception=AcquireExceptionInfo();
magic_info=GetMagicInfo(magick,(size_t) count,sans_exception);
magick=(unsigned char *) RelinquishMagickMemory(magick);
if ((magic_info != (const MagicInfo *) NULL) &&
(GetMagicName(magic_info) != (char *) NULL))
{
/*
Try to use magick_info that was determined earlier by the extension
*/
if ((magick_info != (const MagickInfo *) NULL) &&
(GetMagickUseExtension(magick_info) != MagickFalse) &&
(LocaleCompare(magick_info->module,GetMagicName(
magic_info)) == 0))
(void) CopyMagickString(image_info->magick,magick_info->name,
MagickPathExtent);
else
{
(void) CopyMagickString(image_info->magick,GetMagicName(
magic_info),MagickPathExtent);
magick_info=GetMagickInfo(image_info->magick,sans_exception);
}
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
sans_exception=DestroyExceptionInfo(sans_exception);
return(MagickTrue);
}
magick_info=GetMagickInfo(image_info->magick,sans_exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
sans_exception=DestroyExceptionInfo(sans_exception);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e I n f o B l o b %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfoBlob() sets the image info blob member.
%
% The format of the SetImageInfoBlob method is:
%
% void SetImageInfoBlob(ImageInfo *image_info,const void *blob,
% const size_t length)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o blob: the blob.
%
% o length: the blob length.
%
*/
MagickExport void SetImageInfoBlob(ImageInfo *image_info,const void *blob,
const size_t length)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
image_info->blob=(void *) blob;
image_info->length=length;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e I n f o C u s t o m S t r e a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfoCustomStream() sets the image info custom stream handlers.
%
% The format of the SetImageInfoCustomStream method is:
%
% void SetImageInfoCustomStream(ImageInfo *image_info,
% CustomStreamInfo *custom_stream)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o custom_stream: your custom stream methods.
%
*/
MagickExport void SetImageInfoCustomStream(ImageInfo *image_info,
CustomStreamInfo *custom_stream)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
image_info->custom_stream=(CustomStreamInfo *) custom_stream;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e I n f o F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfoFile() sets the image info file member.
%
% The format of the SetImageInfoFile method is:
%
% void SetImageInfoFile(ImageInfo *image_info,FILE *file)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o file: the file.
%
*/
MagickExport void SetImageInfoFile(ImageInfo *image_info,FILE *file)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
image_info->file=file;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageMask() associates a mask with the image. The mask must be the same
% dimensions as the image.
%
% The format of the SetImageMask method is:
%
% MagickBooleanType SetImageMask(Image *image,const PixelMask type,
% const Image *mask,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: the mask type, ReadPixelMask or WritePixelMask.
%
% o mask: the image mask.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageMask(Image *image,const PixelMask type,
const Image *mask,ExceptionInfo *exception)
{
CacheView
*mask_view,
*image_view;
MagickBooleanType
status;
ssize_t
y;
/*
Set image mask.
*/
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (mask == (const Image *) NULL)
{
switch (type)
{
case WritePixelMask: image->write_mask=MagickFalse; break;
default: image->read_mask=MagickFalse; break;
}
return(SyncImagePixelCache(image,exception));
}
switch (type)
{
case WritePixelMask: image->write_mask=MagickTrue; break;
default: image->read_mask=MagickTrue; break;
}
if (SyncImagePixelCache(image,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
mask_view=AcquireVirtualCacheView(mask,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(mask,image,1,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(mask_view,0,y,mask->columns,1,exception);
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
intensity;
intensity=0;
if ((x < (ssize_t) mask->columns) && (y < (ssize_t) mask->rows))
intensity=GetPixelIntensity(mask,p);
switch (type)
{
case WritePixelMask:
{
SetPixelWriteMask(image,ClampToQuantum(intensity),q);
break;
}
default:
{
SetPixelReadMask(image,ClampToQuantum(intensity),q);
break;
}
}
p+=GetPixelChannels(mask);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
mask_view=DestroyCacheView(mask_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e R e g i o n M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageRegionMask() associates a mask with the image as defined by the
% specified region.
%
% The format of the SetImageRegionMask method is:
%
% MagickBooleanType SetImageRegionMask(Image *image,const PixelMask type,
% const RectangleInfo *region,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: the mask type, ReadPixelMask or WritePixelMask.
%
% o geometry: the mask region.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageRegionMask(Image *image,
const PixelMask type,const RectangleInfo *region,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
/*
Set image mask as defined by the region.
*/
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (region == (const RectangleInfo *) NULL)
{
switch (type)
{
case WritePixelMask: image->write_mask=MagickFalse; break;
default: image->read_mask=MagickFalse; break;
}
return(SyncImagePixelCache(image,exception));
}
switch (type)
{
case WritePixelMask: image->write_mask=MagickTrue; break;
default: image->read_mask=MagickTrue; break;
}
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,1,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
Quantum
pixel;
pixel=0;
if (((x >= region->x) && (x < (region->x+(ssize_t) region->width))) &&
((y >= region->y) && (y < (region->y+(ssize_t) region->height))))
pixel=QuantumRange;
switch (type)
{
case WritePixelMask:
{
SetPixelWriteMask(image,pixel,q);
break;
}
default:
{
SetPixelReadMask(image,pixel,q);
break;
}
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e V i r t u a l P i x e l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageVirtualPixelMethod() sets the "virtual pixels" method for the
% image and returns the previous setting. A virtual pixel is any pixel access
% that is outside the boundaries of the image cache.
%
% The format of the SetImageVirtualPixelMethod() method is:
%
% VirtualPixelMethod SetImageVirtualPixelMethod(Image *image,
% const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: choose the type of virtual pixel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport VirtualPixelMethod SetImageVirtualPixelMethod(Image *image,
const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception)
{
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
return(SetPixelCacheVirtualMethod(image,virtual_pixel_method,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S m u s h I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SmushImages() takes all images from the current image pointer to the end
% of the image list and smushes them to each other top-to-bottom if the
% stack parameter is true, otherwise left-to-right.
%
% The current gravity setting now effects how the image is justified in the
% final image.
%
% The format of the SmushImages method is:
%
% Image *SmushImages(const Image *images,const MagickBooleanType stack,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o stack: A value other than 0 stacks the images top-to-bottom.
%
% o offset: minimum distance in pixels between images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t SmushXGap(const Image *smush_image,const Image *images,
const ssize_t offset,ExceptionInfo *exception)
{
CacheView
*left_view,
*right_view;
const Image
*left_image,
*right_image;
RectangleInfo
left_geometry,
right_geometry;
register const Quantum
*p;
register ssize_t
i,
y;
size_t
gap;
ssize_t
x;
if (images->previous == (Image *) NULL)
return(0);
right_image=images;
SetGeometry(smush_image,&right_geometry);
GravityAdjustGeometry(right_image->columns,right_image->rows,
right_image->gravity,&right_geometry);
left_image=images->previous;
SetGeometry(smush_image,&left_geometry);
GravityAdjustGeometry(left_image->columns,left_image->rows,
left_image->gravity,&left_geometry);
gap=right_image->columns;
left_view=AcquireVirtualCacheView(left_image,exception);
right_view=AcquireVirtualCacheView(right_image,exception);
for (y=0; y < (ssize_t) smush_image->rows; y++)
{
for (x=(ssize_t) left_image->columns-1; x > 0; x--)
{
p=GetCacheViewVirtualPixels(left_view,x,left_geometry.y+y,1,1,exception);
if ((p == (const Quantum *) NULL) ||
(GetPixelAlpha(left_image,p) != TransparentAlpha) ||
((left_image->columns-x-1) >= gap))
break;
}
i=(ssize_t) left_image->columns-x-1;
for (x=0; x < (ssize_t) right_image->columns; x++)
{
p=GetCacheViewVirtualPixels(right_view,x,right_geometry.y+y,1,1,
exception);
if ((p == (const Quantum *) NULL) ||
(GetPixelAlpha(right_image,p) != TransparentAlpha) ||
((x+i) >= (ssize_t) gap))
break;
}
if ((x+i) < (ssize_t) gap)
gap=(size_t) (x+i);
}
right_view=DestroyCacheView(right_view);
left_view=DestroyCacheView(left_view);
if (y < (ssize_t) smush_image->rows)
return(offset);
return((ssize_t) gap-offset);
}
static ssize_t SmushYGap(const Image *smush_image,const Image *images,
const ssize_t offset,ExceptionInfo *exception)
{
CacheView
*bottom_view,
*top_view;
const Image
*bottom_image,
*top_image;
RectangleInfo
bottom_geometry,
top_geometry;
register const Quantum
*p;
register ssize_t
i,
x;
size_t
gap;
ssize_t
y;
if (images->previous == (Image *) NULL)
return(0);
bottom_image=images;
SetGeometry(smush_image,&bottom_geometry);
GravityAdjustGeometry(bottom_image->columns,bottom_image->rows,
bottom_image->gravity,&bottom_geometry);
top_image=images->previous;
SetGeometry(smush_image,&top_geometry);
GravityAdjustGeometry(top_image->columns,top_image->rows,top_image->gravity,
&top_geometry);
gap=bottom_image->rows;
top_view=AcquireVirtualCacheView(top_image,exception);
bottom_view=AcquireVirtualCacheView(bottom_image,exception);
for (x=0; x < (ssize_t) smush_image->columns; x++)
{
for (y=(ssize_t) top_image->rows-1; y > 0; y--)
{
p=GetCacheViewVirtualPixels(top_view,top_geometry.x+x,y,1,1,exception);
if ((p == (const Quantum *) NULL) ||
(GetPixelAlpha(top_image,p) != TransparentAlpha) ||
((top_image->rows-y-1) >= gap))
break;
}
i=(ssize_t) top_image->rows-y-1;
for (y=0; y < (ssize_t) bottom_image->rows; y++)
{
p=GetCacheViewVirtualPixels(bottom_view,bottom_geometry.x+x,y,1,1,
exception);
if ((p == (const Quantum *) NULL) ||
(GetPixelAlpha(bottom_image,p) != TransparentAlpha) ||
((y+i) >= (ssize_t) gap))
break;
}
if ((y+i) < (ssize_t) gap)
gap=(size_t) (y+i);
}
bottom_view=DestroyCacheView(bottom_view);
top_view=DestroyCacheView(top_view);
if (x < (ssize_t) smush_image->columns)
return(offset);
return((ssize_t) gap-offset);
}
MagickExport Image *SmushImages(const Image *images,
const MagickBooleanType stack,const ssize_t offset,ExceptionInfo *exception)
{
#define SmushImageTag "Smush/Image"
const Image
*image;
Image
*smush_image;
MagickBooleanType
proceed,
status;
MagickOffsetType
n;
PixelTrait
alpha_trait;
RectangleInfo
geometry;
register const Image
*next;
size_t
height,
number_images,
width;
ssize_t
x_offset,
y_offset;
/*
Compute maximum area of smushed area.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=images;
alpha_trait=image->alpha_trait;
number_images=1;
width=image->columns;
height=image->rows;
next=GetNextImageInList(image);
for ( ; next != (Image *) NULL; next=GetNextImageInList(next))
{
if (next->alpha_trait != UndefinedPixelTrait)
alpha_trait=BlendPixelTrait;
number_images++;
if (stack != MagickFalse)
{
if (next->columns > width)
width=next->columns;
height+=next->rows;
if (next->previous != (Image *) NULL)
height+=offset;
continue;
}
width+=next->columns;
if (next->previous != (Image *) NULL)
width+=offset;
if (next->rows > height)
height=next->rows;
}
/*
Smush images.
*/
smush_image=CloneImage(image,width,height,MagickTrue,exception);
if (smush_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(smush_image,DirectClass,exception) == MagickFalse)
{
smush_image=DestroyImage(smush_image);
return((Image *) NULL);
}
smush_image->alpha_trait=alpha_trait;
(void) SetImageBackgroundColor(smush_image,exception);
status=MagickTrue;
x_offset=0;
y_offset=0;
for (n=0; n < (MagickOffsetType) number_images; n++)
{
SetGeometry(smush_image,&geometry);
GravityAdjustGeometry(image->columns,image->rows,image->gravity,&geometry);
if (stack != MagickFalse)
{
x_offset-=geometry.x;
y_offset-=SmushYGap(smush_image,image,offset,exception);
}
else
{
x_offset-=SmushXGap(smush_image,image,offset,exception);
y_offset-=geometry.y;
}
status=CompositeImage(smush_image,image,OverCompositeOp,MagickTrue,x_offset,
y_offset,exception);
proceed=SetImageProgress(image,SmushImageTag,n,number_images);
if (proceed == MagickFalse)
break;
if (stack == MagickFalse)
{
x_offset+=(ssize_t) image->columns;
y_offset=0;
}
else
{
x_offset=0;
y_offset+=(ssize_t) image->rows;
}
image=GetNextImageInList(image);
}
if (stack == MagickFalse)
smush_image->columns=(size_t) x_offset;
else
smush_image->rows=(size_t) y_offset;
if (status == MagickFalse)
smush_image=DestroyImage(smush_image);
return(smush_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t r i p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StripImage() strips an image of all profiles and comments.
%
% The format of the StripImage method is:
%
% MagickBooleanType StripImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType StripImage(Image *image,ExceptionInfo *exception)
{
MagickBooleanType
status;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
(void) exception;
DestroyImageProfiles(image);
(void) DeleteImageProperty(image,"comment");
(void) DeleteImageProperty(image,"date:create");
(void) DeleteImageProperty(image,"date:modify");
status=SetImageArtifact(image,"png:exclude-chunk",
"bKGD,cHRM,EXIF,gAMA,iCCP,iTXt,sRGB,tEXt,zCCP,zTXt,date");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImage() initializes the red, green, and blue intensities of each pixel
% as defined by the colormap index.
%
% The format of the SyncImage method is:
%
% MagickBooleanType SyncImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Quantum PushColormapIndex(Image *image,const Quantum index,
MagickBooleanType *range_exception)
{
if ((size_t) index < image->colors)
return(index);
*range_exception=MagickTrue;
return((Quantum) 0);
}
MagickExport MagickBooleanType SyncImage(Image *image,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
range_exception,
status,
taint;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (image->ping != MagickFalse)
return(MagickTrue);
if (image->storage_class != PseudoClass)
return(MagickFalse);
assert(image->colormap != (PixelInfo *) NULL);
range_exception=MagickFalse;
status=MagickTrue;
taint=image->taint;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(range_exception,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
index;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
index=PushColormapIndex(image,GetPixelIndex(image,q),&range_exception);
SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->taint=taint;
if ((image->ping == MagickFalse) && (range_exception != MagickFalse))
(void) ThrowMagickException(exception,GetMagickModule(),
CorruptImageWarning,"InvalidColormapIndex","`%s'",image->filename);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c I m a g e S e t t i n g s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImageSettings() syncs any image_info global options into per-image
% attributes.
%
% Note: in IMv6 free form 'options' were always mapped into 'artifacts', so
% that operations and coders can find such settings. In IMv7 if a desired
% per-image artifact is not set, then it will directly look for a global
% option as a fallback, as such this copy is no longer needed, only the
% link set up.
%
% The format of the SyncImageSettings method is:
%
% MagickBooleanType SyncImageSettings(const ImageInfo *image_info,
% Image *image,ExceptionInfo *exception)
% MagickBooleanType SyncImagesSettings(const ImageInfo *image_info,
% Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SyncImagesSettings(ImageInfo *image_info,
Image *images,ExceptionInfo *exception)
{
Image
*image;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
image=images;
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
(void) SyncImageSettings(image_info,image,exception);
(void) DeleteImageOption(image_info,"page");
return(MagickTrue);
}
MagickExport MagickBooleanType SyncImageSettings(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
const char
*option;
GeometryInfo
geometry_info;
MagickStatusType
flags;
ResolutionType
units;
/*
Sync image options.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
option=GetImageOption(image_info,"background");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&image->background_color,
exception);
option=GetImageOption(image_info,"black-point-compensation");
if (option != (const char *) NULL)
image->black_point_compensation=(MagickBooleanType) ParseCommandOption(
MagickBooleanOptions,MagickFalse,option);
option=GetImageOption(image_info,"blue-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.blue_primary.x=geometry_info.rho;
image->chromaticity.blue_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.blue_primary.y=image->chromaticity.blue_primary.x;
}
option=GetImageOption(image_info,"bordercolor");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&image->border_color,
exception);
/* FUTURE: do not sync compose to per-image compose setting here */
option=GetImageOption(image_info,"compose");
if (option != (const char *) NULL)
image->compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions,
MagickFalse,option);
/* -- */
option=GetImageOption(image_info,"compress");
if (option != (const char *) NULL)
image->compression=(CompressionType) ParseCommandOption(
MagickCompressOptions,MagickFalse,option);
option=GetImageOption(image_info,"debug");
if (option != (const char *) NULL)
image->debug=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions,
MagickFalse,option);
option=GetImageOption(image_info,"density");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->resolution.x=geometry_info.rho;
image->resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->resolution.y=image->resolution.x;
}
option=GetImageOption(image_info,"depth");
if (option != (const char *) NULL)
image->depth=StringToUnsignedLong(option);
option=GetImageOption(image_info,"endian");
if (option != (const char *) NULL)
image->endian=(EndianType) ParseCommandOption(MagickEndianOptions,
MagickFalse,option);
option=GetImageOption(image_info,"filter");
if (option != (const char *) NULL)
image->filter=(FilterType) ParseCommandOption(MagickFilterOptions,
MagickFalse,option);
option=GetImageOption(image_info,"fuzz");
if (option != (const char *) NULL)
image->fuzz=StringToDoubleInterval(option,(double) QuantumRange+1.0);
option=GetImageOption(image_info,"gravity");
if (option != (const char *) NULL)
image->gravity=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,option);
option=GetImageOption(image_info,"green-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.green_primary.x=geometry_info.rho;
image->chromaticity.green_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.green_primary.y=image->chromaticity.green_primary.x;
}
option=GetImageOption(image_info,"intent");
if (option != (const char *) NULL)
image->rendering_intent=(RenderingIntent) ParseCommandOption(
MagickIntentOptions,MagickFalse,option);
option=GetImageOption(image_info,"intensity");
if (option != (const char *) NULL)
image->intensity=(PixelIntensityMethod) ParseCommandOption(
MagickPixelIntensityOptions,MagickFalse,option);
option=GetImageOption(image_info,"interlace");
if (option != (const char *) NULL)
image->interlace=(InterlaceType) ParseCommandOption(MagickInterlaceOptions,
MagickFalse,option);
option=GetImageOption(image_info,"interpolate");
if (option != (const char *) NULL)
image->interpolate=(PixelInterpolateMethod) ParseCommandOption(
MagickInterpolateOptions,MagickFalse,option);
option=GetImageOption(image_info,"loop");
if (option != (const char *) NULL)
image->iterations=StringToUnsignedLong(option);
option=GetImageOption(image_info,"mattecolor");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&image->matte_color,
exception);
option=GetImageOption(image_info,"orient");
if (option != (const char *) NULL)
image->orientation=(OrientationType) ParseCommandOption(
MagickOrientationOptions,MagickFalse,option);
option=GetImageOption(image_info,"page");
if (option != (const char *) NULL)
{
char
*geometry;
geometry=GetPageGeometry(option);
flags=ParseAbsoluteGeometry(geometry,&image->page);
geometry=DestroyString(geometry);
}
option=GetImageOption(image_info,"quality");
if (option != (const char *) NULL)
image->quality=StringToUnsignedLong(option);
option=GetImageOption(image_info,"red-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.red_primary.x=geometry_info.rho;
image->chromaticity.red_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.red_primary.y=image->chromaticity.red_primary.x;
}
if (image_info->quality != UndefinedCompressionQuality)
image->quality=image_info->quality;
option=GetImageOption(image_info,"scene");
if (option != (const char *) NULL)
image->scene=StringToUnsignedLong(option);
option=GetImageOption(image_info,"taint");
if (option != (const char *) NULL)
image->taint=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions,
MagickFalse,option);
option=GetImageOption(image_info,"tile-offset");
if (option != (const char *) NULL)
{
char
*geometry;
geometry=GetPageGeometry(option);
flags=ParseAbsoluteGeometry(geometry,&image->tile_offset);
geometry=DestroyString(geometry);
}
option=GetImageOption(image_info,"transparent-color");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&image->transparent_color,
exception);
option=GetImageOption(image_info,"type");
if (option != (const char *) NULL)
image->type=(ImageType) ParseCommandOption(MagickTypeOptions,MagickFalse,
option);
option=GetImageOption(image_info,"units");
units=image_info->units;
if (option != (const char *) NULL)
units=(ResolutionType) ParseCommandOption(MagickResolutionOptions,
MagickFalse,option);
if (units != UndefinedResolution)
{
if (image->units != units)
switch (image->units)
{
case PixelsPerInchResolution:
{
if (units == PixelsPerCentimeterResolution)
{
image->resolution.x/=2.54;
image->resolution.y/=2.54;
}
break;
}
case PixelsPerCentimeterResolution:
{
if (units == PixelsPerInchResolution)
{
image->resolution.x=(double) ((size_t) (100.0*2.54*
image->resolution.x+0.5))/100.0;
image->resolution.y=(double) ((size_t) (100.0*2.54*
image->resolution.y+0.5))/100.0;
}
break;
}
default:
break;
}
image->units=units;
}
option=GetImageOption(image_info,"virtual-pixel");
if (option != (const char *) NULL)
(void) SetImageVirtualPixelMethod(image,(VirtualPixelMethod)
ParseCommandOption(MagickVirtualPixelOptions,MagickFalse,option),
exception);
option=GetImageOption(image_info,"white-point");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.white_point.x=geometry_info.rho;
image->chromaticity.white_point.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.white_point.y=image->chromaticity.white_point.x;
}
/*
Pointer to allow the lookup of pre-image artifact will fallback to a global
option setting/define. This saves a lot of duplication of global options
into per-image artifacts, while ensuring only specifically set per-image
artifacts are preserved when parenthesis ends.
*/
if (image->image_info != (ImageInfo *) NULL)
image->image_info=DestroyImageInfo(image->image_info);
image->image_info=CloneImageInfo(image_info);
return(MagickTrue);
}
|
Wind_dir.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include <math.h>
#include "grb2.h"
#include "wgrib2.h"
#include "fnlist.h"
/*
* Wind dir - U then V in input file
*
* v 0.1 experimental based on Wind_speed.c
*
* 3/2009: Public Domain: Wesley Ebisuzaki (wind_speed.c)
* 1/2013: Public Domain: Wesley Ebisuzaki (wind_dir.c)
*
*/
extern int decode, file_append, save_translation;
extern unsigned int nx_, ny_;
extern int flush_mode;
extern int use_scale, dec_scale, bin_scale, wanted_bits, max_bits;
extern enum output_grib_type grib_type;
/*
* HEADER:100:wind_dir:output:1:calculate wind direction, X = output gribfile (direction in degrees, 0=wind from north, 90=wind from east)
*/
int f_wind_dir(ARG1) {
struct local_struct {
float *val;
int has_u;
unsigned char *clone_sec[9];
struct seq_file out;
};
struct local_struct *save;
unsigned int i;
int is_u;
float *d1, *data_tmp;
int discipline, mastertab, parmcat, parmnum;
if (mode == -1) { // initialization
save_translation = decode = 1;
// allocate static variables
*local = save = (struct local_struct *) malloc( sizeof(struct local_struct));
if (save == NULL) fatal_error("memory allocation -wind_dir","");
if (fopen_file(&(save->out), arg1, file_append ? "ab" : "wb") != 0) {
free(save);
fatal_error("Could not open %s", arg1);
}
save->has_u = 0;
init_sec(save->clone_sec);
return 0;
}
save = *local;
if (mode == -2) { // cleanup
if (save->has_u == 1) {
free(save->val);
free_sec(save->clone_sec);
}
fclose_file(&(save->out));
free(save);
return 0;
}
if (mode >= 0) { // processing
// get variable name parameters
discipline = GB2_Discipline(sec);
mastertab = GB2_MasterTable(sec);
parmcat = GB2_ParmCat(sec);
parmnum = GB2_ParmNum(sec);
if (mode == 99) fprintf(stderr,"-wind_speed %d %d %d %d\n",mastertab,discipline,parmcat,parmnum);
is_u = (mastertab != 255) && (discipline == 0) && (parmcat == 2) && (parmnum == 2);
if (mode == 99 && is_u) fprintf(stderr,"\n-wind_speed: is u\n");
if (is_u) { // save data
if (save->has_u) {
free(save->val);
free_sec(save->clone_sec);
}
copy_sec(sec, save->clone_sec);
copy_data(data,ndata,&(save->val));
GB2_ParmNum(save->clone_sec) = 3; // set id to V
save->has_u = 1;
return 0;
}
if (save->has_u == 0) return 0;
// check for V
if (same_sec0(sec,save->clone_sec) == 1 &&
same_sec1(sec,save->clone_sec) == 1 &&
same_sec3(sec,save->clone_sec) == 1 &&
same_sec4(sec,save->clone_sec) == 1) {
// check to see if winds are earth relative
if ( (flag_table_3_3(sec) & 8) != 0 ||
(flag_table_3_3(save->clone_sec) & 8) != 0) {
fprintf(stderr,"wind_dir will not work with grid-relative winds, skipping\n");
free(save->val);
free_sec(save->clone_sec);
save->has_u = 0;
return 0;
}
// calculate wind direction
if (mode == 99) fprintf(stderr,"\n-wind_dir: calc wind direction\n");
d1 = save->val;
#pragma omp parallel for private(i)
for (i = 0; i < ndata; i++) {
if (!UNDEFINED_VAL(data[i]) && !UNDEFINED_VAL(d1[i])) {
d1[i] = (atan2(d1[i],data[i]) * 180.0 / 3.14159265359 + 180.0);
}
else d1[i] = UNDEFINED;
}
GB2_ParmNum(save->clone_sec) = 0; // set id to direction degrees
// copy data to temp space
if ((data_tmp = (float *) malloc(sizeof(float) * (size_t) ndata)) == NULL)
fatal_error("memory allocation - data_tmp","");
undo_output_order(save->val, data_tmp, ndata);
grib_wrt(save->clone_sec, data_tmp, ndata, nx_, ny_, use_scale, dec_scale,
bin_scale, wanted_bits, max_bits, grib_type, &(save->out));
if (flush_mode) fflush_file(&(save->out));
free(data_tmp);
// cleanup
free(save->val);
free_sec(save->clone_sec);
save->has_u = 0;
}
}
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.